1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/graphKit.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/movenode.hpp"
  36 #include "opto/narrowptrnode.hpp"
  37 #include "opto/opcodes.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/type.hpp"
  41 #include "utilities/globalDefinitions.hpp"
  42 
  43 // Clones the inline type to handle control flow merges involving multiple inline types.
  44 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  45 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) {
  46   InlineTypeNode* vt = clone_if_required(gvn, map);
  47   const Type* t = Type::get_const_type(inline_klass());
  48   gvn->set_type(vt, t);
  49   vt->as_InlineType()->set_type(t);
  50 
  51   // Create a PhiNode for merging the oop values
  52   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  53   gvn->set_type(oop, t);
  54   gvn->record_for_igvn(oop);
  55   vt->set_oop(*gvn, oop);
  56 
  57   // Create a PhiNode for merging the is_buffered values
  58   t = Type::get_const_basic_type(T_BOOLEAN);
  59   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  60   gvn->set_type(is_buffered_node, t);
  61   gvn->record_for_igvn(is_buffered_node);
  62   vt->set_req(IsBuffered, is_buffered_node);
  63 
  64   // Create a PhiNode for merging the is_init values
  65   Node* is_init_node;
  66   if (is_init) {
  67     is_init_node = gvn->intcon(1);
  68   } else {
  69     t = Type::get_const_basic_type(T_BOOLEAN);
  70     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  71     gvn->set_type(is_init_node, t);
  72     gvn->record_for_igvn(is_init_node);
  73   }
  74   vt->set_req(IsInit, is_init_node);
  75 
  76   // Create a PhiNode each for merging the field values
  77   for (uint i = 0; i < vt->field_count(); ++i) {
  78     ciType* type = vt->field_type(i);
  79     Node*  value = vt->field_value(i);
  80     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  81     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  82     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  83     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  84     if (type->is_inlinetype() && no_circularity) {
  85       // Handle inline type fields recursively
  86       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  87     } else {
  88       t = Type::get_const_type(type);
  89       value = PhiNode::make(region, value, t);
  90       gvn->set_type(value, t);
  91       gvn->record_for_igvn(value);
  92     }
  93     vt->set_field_value(i, value);
  94   }
  95   gvn->record_for_igvn(vt);
  96   return vt;
  97 }
  98 
  99 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 100 // for the given region (see InlineTypeNode::clone_with_phis).
 101 bool InlineTypeNode::has_phi_inputs(Node* region) {
 102   // Check oop input
 103   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 104 #ifdef ASSERT
 105   if (result) {
 106     // Check all field value inputs for consistency
 107     for (uint i = Values; i < field_count(); ++i) {
 108       Node* n = in(i);
 109       if (n->is_InlineType()) {
 110         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 111       } else {
 112         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 113       }
 114     }
 115   }
 116 #endif
 117   return result;
 118 }
 119 
 120 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 121 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 122   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 123 
 124   // Merge oop inputs
 125   PhiNode* phi = get_oop()->as_Phi();
 126   phi->set_req(pnum, other->get_oop());
 127   if (transform) {
 128     set_oop(*gvn, gvn->transform(phi));
 129   }
 130 
 131   // Merge is_buffered inputs
 132   phi = get_is_buffered()->as_Phi();
 133   phi->set_req(pnum, other->get_is_buffered());
 134   if (transform) {
 135     set_req(IsBuffered, gvn->transform(phi));
 136   }
 137 
 138   // Merge is_init inputs
 139   Node* is_init = get_is_init();
 140   if (is_init->is_Phi()) {
 141     phi = is_init->as_Phi();
 142     phi->set_req(pnum, other->get_is_init());
 143     if (transform) {
 144       set_req(IsInit, gvn->transform(phi));
 145     }
 146   } else {
 147     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 148   }
 149 
 150   // Merge field values
 151   for (uint i = 0; i < field_count(); ++i) {
 152     Node* val1 =        field_value(i);
 153     Node* val2 = other->field_value(i);
 154     if (val1->is_InlineType()) {
 155       if (val2->is_Phi()) {
 156         val2 = gvn->transform(val2);
 157       }
 158       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 159     } else {
 160       assert(val1->is_Phi(), "must be a phi node");
 161       val1->set_req(pnum, val2);
 162     }
 163     if (transform) {
 164       set_field_value(i, gvn->transform(val1));
 165     }
 166   }
 167   return this;
 168 }
 169 
 170 // Adds a new merge path to an inline type node with phi inputs
 171 void InlineTypeNode::add_new_path(Node* region) {
 172   assert(has_phi_inputs(region), "must have phi inputs");
 173 
 174   PhiNode* phi = get_oop()->as_Phi();
 175   phi->add_req(nullptr);
 176   assert(phi->req() == region->req(), "must be same size as region");
 177 
 178   phi = get_is_buffered()->as_Phi();
 179   phi->add_req(nullptr);
 180   assert(phi->req() == region->req(), "must be same size as region");
 181 
 182   phi = get_is_init()->as_Phi();
 183   phi->add_req(nullptr);
 184   assert(phi->req() == region->req(), "must be same size as region");
 185 
 186   for (uint i = 0; i < field_count(); ++i) {
 187     Node* val = field_value(i);
 188     if (val->is_InlineType()) {
 189       val->as_InlineType()->add_new_path(region);
 190     } else {
 191       val->as_Phi()->add_req(nullptr);
 192       assert(val->req() == region->req(), "must be same size as region");
 193     }
 194   }
 195 }
 196 
 197 Node* InlineTypeNode::field_value(uint index) const {
 198   assert(index < field_count(), "index out of bounds");
 199   return in(Values + index);
 200 }
 201 
 202 // Get the value of the field at the given offset.
 203 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 204 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 205   // Find the declared field which contains the field we are looking for
 206   int index = inline_klass()->field_index_by_offset(offset);
 207   Node* value = field_value(index);
 208   assert(value != nullptr, "field value not found");
 209 
 210   if (!recursive || !field_is_flat(index)) {
 211     assert(offset == field_offset(index), "offset mismatch");
 212     return value;
 213   }
 214 
 215   // Flat inline type field
 216   InlineTypeNode* vt = value->as_InlineType();
 217   if (offset == field_null_marker_offset(index)) {
 218     return vt->get_is_init();
 219   } else {
 220     int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field
 221     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 222     return vt->field_value_by_offset(sub_offset, recursive);
 223   }
 224 }
 225 
 226 void InlineTypeNode::set_field_value(uint index, Node* value) {
 227   assert(index < field_count(), "index out of bounds");
 228   set_req(Values + index, value);
 229 }
 230 
 231 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 232   set_field_value(field_index(offset), value);
 233 }
 234 
 235 int InlineTypeNode::field_offset(uint index) const {
 236   assert(index < field_count(), "index out of bounds");
 237   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 238 }
 239 
 240 uint InlineTypeNode::field_index(int offset) const {
 241   uint i = 0;
 242   for (; i < field_count() && field_offset(i) != offset; i++) { }
 243   assert(i < field_count(), "field not found");
 244   return i;
 245 }
 246 
 247 ciType* InlineTypeNode::field_type(uint index) const {
 248   assert(index < field_count(), "index out of bounds");
 249   return inline_klass()->declared_nonstatic_field_at(index)->type();
 250 }
 251 
 252 bool InlineTypeNode::field_is_flat(uint index) const {
 253   assert(index < field_count(), "index out of bounds");
 254   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 255   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 256   return field->is_flat();
 257 }
 258 
 259 bool InlineTypeNode::field_is_null_free(uint index) const {
 260   assert(index < field_count(), "index out of bounds");
 261   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 262   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 263   return field->is_null_free();
 264 }
 265 
 266 bool InlineTypeNode::field_is_volatile(uint index) const {
 267   assert(index < field_count(), "index out of bounds");
 268   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 269   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 270   return field->is_volatile();
 271 }
 272 
 273 int InlineTypeNode::field_null_marker_offset(uint index) const {
 274   assert(index < field_count(), "index out of bounds");
 275   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 276   assert(field->is_flat(), "must be an inline type");
 277   return field->null_marker_offset();
 278 }
 279 
 280 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 281   uint cnt = 0;
 282   for (uint i = 0; i < field_count(); ++i) {
 283     Node* value = field_value(i);
 284     if (field_is_flat(i)) {
 285       InlineTypeNode* vt = value->as_InlineType();
 286       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 287       if (!field_is_null_free(i)) {
 288         // The null marker of a flat field is added right after we scalarize that field
 289         sfpt->add_req(vt->get_is_init());
 290         cnt++;
 291       }
 292       continue;
 293     }
 294     if (value->is_InlineType()) {
 295       // Add inline type to the worklist to process later
 296       worklist.push(value);
 297     }
 298     sfpt->add_req(value);
 299     cnt++;
 300   }
 301   return cnt;
 302 }
 303 
 304 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 305   JVMState* jvms = sfpt->jvms();
 306   assert(jvms != nullptr, "missing JVMS");
 307   uint first_ind = (sfpt->req() - jvms->scloff());
 308 
 309   // Iterate over the inline type fields in order of increasing offset and add the
 310   // field values to the safepoint. Nullable inline types have an IsInit field that
 311   // needs to be checked before using the field values.
 312   sfpt->add_req(get_is_init());
 313   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 314   jvms->set_endoff(sfpt->req());
 315   // Replace safepoint edge by SafePointScalarObjectNode
 316   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 317                                                                   nullptr,
 318                                                                   first_ind,
 319                                                                   sfpt->jvms()->depth(),
 320                                                                   nfields);
 321   sobj->init_req(0, igvn->C->root());
 322   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 323   igvn->rehash_node_delayed(sfpt);
 324   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 325     Node* debug = sfpt->in(i);
 326     if (debug != nullptr && debug->uncast() == this) {
 327       sfpt->set_req(i, sobj);
 328     }
 329   }
 330 }
 331 
 332 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 333   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 334   // in the safepoint to avoid keeping field loads live just for the debug info.
 335   Node* oop = get_oop();
 336   bool use_oop = false;
 337   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 338     Unique_Node_List worklist;
 339     VectorSet visited;
 340     visited.set(oop->_idx);
 341     worklist.push(oop);
 342     use_oop = true;
 343     while (worklist.size() > 0 && use_oop) {
 344       Node* n = worklist.pop();
 345       for (uint i = 1; i < n->req(); i++) {
 346         Node* in = n->in(i);
 347         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 348           worklist.push(in);
 349         } else if (!(in->is_Con() || in->is_Parm())) {
 350           use_oop = false;
 351           break;
 352         }
 353       }
 354     }
 355   } else {
 356     use_oop = allow_oop && is_allocated(igvn) &&
 357               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 358   }
 359 
 360   ResourceMark rm;
 361   Unique_Node_List safepoints;
 362   Unique_Node_List vt_worklist;
 363   Unique_Node_List worklist;
 364   worklist.push(this);
 365   while (worklist.size() > 0) {
 366     Node* n = worklist.pop();
 367     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 368       Node* use = n->fast_out(i);
 369       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 370         safepoints.push(use);
 371       } else if (use->is_ConstraintCast()) {
 372         worklist.push(use);
 373       }
 374     }
 375   }
 376 
 377   // Process all safepoint uses and scalarize inline type
 378   while (safepoints.size() > 0) {
 379     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 380     if (use_oop) {
 381       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 382         Node* debug = sfpt->in(i);
 383         if (debug != nullptr && debug->uncast() == this) {
 384           sfpt->set_req(i, get_oop());
 385         }
 386       }
 387       igvn->rehash_node_delayed(sfpt);
 388     } else {
 389       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 390     }
 391   }
 392   // Now scalarize non-flat fields
 393   for (uint i = 0; i < vt_worklist.size(); ++i) {
 394     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 395     vt->make_scalar_in_safepoints(igvn);
 396   }
 397   if (outcnt() == 0) {
 398     igvn->record_for_igvn(this);
 399   }
 400 }
 401 
 402 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 403 // of the same type but with different scalarization depth during GVN. This method adjusts the
 404 // scalarization depth to avoid inconsistencies during merging.
 405 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 406   if (!kit->C->has_circular_inline_type()) {
 407     return this;
 408   }
 409   GrowableArray<ciType*> visited;
 410   visited.push(inline_klass());
 411   return adjust_scalarization_depth_impl(kit, visited);
 412 }
 413 
 414 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 415   InlineTypeNode* val = this;
 416   for (uint i = 0; i < field_count(); ++i) {
 417     Node* value = field_value(i);
 418     Node* new_value = value;
 419     ciType* ft = field_type(i);
 420     if (value->is_InlineType()) {
 421       if (!field_is_flat(i) && visited.contains(ft)) {
 422         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 423       } else {
 424         int old_len = visited.length();
 425         visited.push(ft);
 426         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 427         visited.trunc_to(old_len);
 428       }
 429     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 430       int old_len = visited.length();
 431       visited.push(ft);
 432       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 433       visited.trunc_to(old_len);
 434     }
 435     if (value != new_value) {
 436       if (val == this) {
 437         val = clone_if_required(&kit->gvn(), kit->map());
 438       }
 439       val->set_field_value(i, new_value);
 440     }
 441   }
 442   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 443 }
 444 
 445 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 446   // Initialize the inline type by loading its field values from
 447   // memory and adding the values as input edges to the node.
 448   ciInlineKlass* vk = inline_klass();
 449   for (uint i = 0; i < field_count(); ++i) {
 450     int field_off = field_offset(i) - vk->payload_offset();
 451     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 452     Node* value = nullptr;
 453     ciType* ft = field_type(i);
 454     bool field_null_free = field_is_null_free(i);
 455     if (field_is_flat(i)) {
 456       // Recursively load the flat inline type field
 457       ciInlineKlass* fvk = ft->as_inline_klass();
 458       // Atomic if nullable or not LooselyConsistentValue
 459       bool atomic = !field_null_free || fvk->must_be_atomic();
 460 
 461       int old_len = visited.length();
 462       visited.push(ft);
 463       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 464                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 465       visited.trunc_to(old_len);
 466     } else {
 467       // Load field value from memory
 468       BasicType bt = type2field[ft->basic_type()];
 469       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 470       const Type* val_type = Type::get_const_type(ft);
 471       if (trust_null_free_oop && field_null_free) {
 472         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 473       }
 474       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 475       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 476       // Loading a non-flattened inline type from memory
 477       if (visited.contains(ft)) {
 478         kit->C->set_has_circular_inline_type(true);
 479       } else if (ft->is_inlinetype()) {
 480         int old_len = visited.length();
 481         visited.push(ft);
 482         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 483         visited.trunc_to(old_len);
 484       }
 485     }
 486     set_field_value(i, value);
 487   }
 488 }
 489 
 490 // Get a field value from the payload by shifting it according to the offset
 491 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) {
 492   // Shift to the right position in the long value
 493   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 494   Node* value = nullptr;
 495   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 496   if (bt == T_LONG) {
 497     value = gvn->transform(new URShiftLNode(payload, shift_val));
 498     value = gvn->transform(new ConvL2INode(value));
 499   } else {
 500     value = gvn->transform(new URShiftINode(payload, shift_val));
 501   }
 502 
 503   if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) {
 504     return value;
 505   } else {
 506     // Make sure to zero unused bits in the 32-bit value
 507     return Compile::narrow_value(val_bt, value, nullptr, gvn, true);
 508   }
 509 }
 510 
 511 // Convert a payload value to field values
 512 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, bool trust_null_free_oop) {
 513   PhaseGVN* gvn = &kit->gvn();
 514   ciInlineKlass* vk = inline_klass();
 515   Node* value = nullptr;
 516   if (!null_free) {
 517     // Get the null marker
 518     value = get_payload_value(gvn, payload, bt, T_BOOLEAN, holder_offset + vk->null_marker_offset_in_payload());
 519     set_req(IsInit, value);
 520   }
 521   // Iterate over the fields and get their values from the payload
 522   for (uint i = 0; i < field_count(); ++i) {
 523     ciType* ft = field_type(i);
 524     bool field_null_free = field_is_null_free(i);
 525     int offset = holder_offset + field_offset(i) - vk->payload_offset();
 526     if (field_is_flat(i)) {
 527       InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free);
 528       vt->convert_from_payload(kit, bt, payload, offset, field_null_free, trust_null_free_oop && field_null_free);
 529       value = gvn->transform(vt);
 530     } else {
 531       value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset);
 532       if (!ft->is_primitive_type()) {
 533         // Narrow oop field
 534         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 535         const Type* val_type = Type::get_const_type(ft);
 536         if (trust_null_free_oop && field_null_free) {
 537           val_type = val_type->join_speculative(TypePtr::NOTNULL);
 538         }
 539         value = gvn->transform(new CastI2NNode(kit->control(), value));
 540         value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop()));
 541         value = gvn->transform(new CastPPNode(kit->control(), value, val_type, ConstraintCastNode::UnconditionalDependency));
 542 
 543         // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
 544         // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
 545         // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
 546         // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
 547 
 548         if (ft->is_inlinetype()) {
 549           GrowableArray<ciType*> visited;
 550           value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 551         }
 552       }
 553     }
 554     set_field_value(i, value);
 555   }
 556 }
 557 
 558 // Set a field value in the payload by shifting it according to the offset
 559 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) {
 560   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 561 
 562   // Make sure to zero unused bits in the 32-bit value
 563   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
 564     value = gvn->transform(new AndINode(value, gvn->intcon(0xFF)));
 565   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
 566     value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF)));
 567   } else if (val_bt == T_FLOAT) {
 568     value = gvn->transform(new MoveF2INode(value));
 569   } else {
 570     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
 571   }
 572 
 573   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 574   if (bt == T_LONG) {
 575     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
 576     value = gvn->transform(new ConvI2LNode(value));
 577     value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF)));
 578 
 579     Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val));
 580     payload = new OrLNode(shift_value, payload);
 581   } else {
 582     Node* shift_value = gvn->transform(new LShiftINode(value, shift_val));
 583     payload = new OrINode(shift_value, payload);
 584   }
 585   return gvn->transform(payload);
 586 }
 587 
 588 // Convert the field values to a payload value of type 'bt'
 589 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const {
 590   PhaseGVN* gvn = &kit->gvn();
 591   Node* value = nullptr;
 592   if (!null_free) {
 593     // Set the null marker
 594     value = get_is_init();
 595     payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset);
 596   }
 597   // Iterate over the fields and add their values to the payload
 598   for (uint i = 0; i < field_count(); ++i) {
 599     value = field_value(i);
 600     int inner_offset = field_offset(i) - inline_klass()->payload_offset();
 601     int offset = holder_offset + inner_offset;
 602     if (field_is_flat(i)) {
 603       null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset();
 604       payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2);
 605     } else {
 606       ciType* ft = field_type(i);
 607       BasicType field_bt = ft->basic_type();
 608       if (!ft->is_primitive_type()) {
 609         // Narrow oop field
 610         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 611         assert(inner_offset != -1, "sanity");
 612         if (oop_off_1 == -1) {
 613           oop_off_1 = inner_offset;
 614         } else {
 615           assert(oop_off_2 == -1, "already set");
 616           oop_off_2 = inner_offset;
 617         }
 618         const Type* val_type = Type::get_const_type(ft)->make_narrowoop();
 619         if (value->is_InlineType()) {
 620           PreserveReexecuteState preexecs(kit);
 621           kit->jvms()->set_should_reexecute(true);
 622           value = value->as_InlineType()->buffer(kit, false);
 623         }
 624         value = gvn->transform(new EncodePNode(value, val_type));
 625         value = gvn->transform(new CastP2XNode(kit->control(), value));
 626         value = gvn->transform(new ConvL2INode(value));
 627         field_bt = T_INT;
 628       }
 629       payload = set_payload_value(gvn, payload, bt, value, field_bt, offset);
 630     }
 631   }
 632   return payload;
 633 }
 634 
 635 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) const {
 636   ciInlineKlass* vk = inline_klass();
 637   bool do_atomic = atomic;
 638   // With immutable memory, a non-atomic load and an atomic load are the same
 639   if (immutable_memory) {
 640     do_atomic = false;
 641   }
 642   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 643   if (vk->is_naturally_atomic(null_free)) {
 644     do_atomic = false;
 645   }
 646 
 647   if (!do_atomic) {
 648     if (!null_free) {
 649       int nm_offset = vk->null_marker_offset_in_payload();
 650       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 651       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 652       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_is_init(), TypeInt::BOOL, T_BOOLEAN, decorators);
 653     }
 654     store(kit, base, ptr, immutable_memory, decorators);
 655     return;
 656   }
 657 
 658   // Convert to a payload value <= 64-bit and write atomically.
 659   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
 660   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
 661   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
 662   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
 663   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
 664   BasicType store_bt = vk->atomic_size_to_basic_type(null_free);
 665   Node* payload = (store_bt == T_LONG) ? kit->longcon(0) : kit->intcon(0);
 666   int oop_off_1 = -1;
 667   int oop_off_2 = -1;
 668   payload = convert_to_payload(kit, store_bt, payload, 0, null_free, vk->null_marker_offset_in_payload(), oop_off_1, oop_off_2);
 669   if (!UseG1GC || oop_off_1 == -1) {
 670     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
 671     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
 672     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
 673     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
 674     const Type* val_type = Type::get_const_basic_type(store_bt);
 675     kit->insert_mem_bar(Op_MemBarCPUOrder);
 676     kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, store_bt, decorators | C2_MISMATCHED, true, this);
 677     kit->insert_mem_bar(Op_MemBarCPUOrder);
 678   } else {
 679     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
 680     assert(UseG1GC, "Unexpected GC");
 681     assert(store_bt == T_LONG, "Unexpected payload type");
 682     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
 683     Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr;
 684     kit->insert_mem_bar(Op_MemBarCPUOrder);
 685     Node* mem = kit->reset_memory();
 686     kit->set_all_memory(mem);
 687     Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
 688     kit->set_memory(st, TypeRawPtr::BOTTOM);
 689     kit->insert_mem_bar(Op_MemBarCPUOrder);
 690   }
 691 }
 692 
 693 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) const {
 694   PhaseGVN& gvn = kit->gvn();
 695   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 696   kit->C->set_flat_accesses();
 697   ciInlineKlass* vk = inline_klass();
 698   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 699 
 700   RegionNode* region = new RegionNode(4);
 701   gvn.set_type(region, Type::CONTROL);
 702   kit->record_for_igvn(region);
 703 
 704   Node* input_memory_state = kit->reset_memory();
 705   kit->set_all_memory(input_memory_state);
 706 
 707   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 708   gvn.set_type(mem, Type::MEMORY);
 709   kit->record_for_igvn(mem);
 710 
 711   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 712   gvn.set_type(io, Type::ABIO);
 713   kit->record_for_igvn(io);
 714 
 715   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 716   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 717 
 718   // Nullable
 719   kit->set_control(kit->IfFalse(iff_null_free));
 720   if (!kit->stopped()) {
 721     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 722     kit->set_all_memory(input_memory_state);
 723     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
 724     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 725     store_flat(kit, cast, ptr, true, false, false, decorators);
 726 
 727     region->init_req(1, kit->control());
 728     mem->set_req(1, kit->reset_memory());
 729     io->set_req(1, kit->i_o());
 730   }
 731 
 732   // Null-free
 733   kit->set_control(kit->IfTrue(iff_null_free));
 734   if (!kit->stopped()) {
 735     kit->set_all_memory(input_memory_state);
 736 
 737     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 738     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 739 
 740     // Atomic
 741     kit->set_control(kit->IfTrue(iff_atomic));
 742     if (!kit->stopped()) {
 743       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 744       kit->set_all_memory(input_memory_state);
 745       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
 746       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 747       store_flat(kit, cast, ptr, true, false, true, decorators);
 748 
 749       region->init_req(2, kit->control());
 750       mem->set_req(2, kit->reset_memory());
 751       io->set_req(2, kit->i_o());
 752     }
 753 
 754     // Non-atomic
 755     kit->set_control(kit->IfFalse(iff_atomic));
 756     if (!kit->stopped()) {
 757       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 758       kit->set_all_memory(input_memory_state);
 759       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
 760       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 761       store_flat(kit, cast, ptr, false, false, true, decorators);
 762 
 763       region->init_req(3, kit->control());
 764       mem->set_req(3, kit->reset_memory());
 765       io->set_req(3, kit->i_o());
 766     }
 767   }
 768 
 769   kit->set_control(gvn.transform(region));
 770   kit->set_all_memory(gvn.transform(mem));
 771   kit->set_i_o(gvn.transform(io));
 772 }
 773 
 774 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 775   // Write field values to memory
 776   ciInlineKlass* vk = inline_klass();
 777   for (uint i = 0; i < field_count(); ++i) {
 778     int field_off = field_offset(i) - vk->payload_offset();
 779     Node* field_val = field_value(i);
 780     bool field_null_free = field_is_null_free(i);
 781     ciType* ft = field_type(i);
 782     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 783     if (field_is_flat(i)) {
 784       // Recursively store the flat inline type field
 785       ciInlineKlass* fvk = ft->as_inline_klass();
 786       // Atomic if nullable or not LooselyConsistentValue
 787       bool atomic = !field_null_free || fvk->must_be_atomic();
 788 
 789       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 790     } else {
 791       // Store field value to memory
 792       BasicType bt = type2field[ft->basic_type()];
 793       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 794       const Type* val_type = Type::get_const_type(ft);
 795       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 796     }
 797   }
 798 }
 799 
 800 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 801   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 802     // Already buffered
 803     return this;
 804   }
 805 
 806   // Check if inline type is already buffered
 807   Node* not_buffered_ctl = kit->top();
 808   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 809   if (not_buffered_ctl->is_top()) {
 810     // Already buffered
 811     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 812     vt->set_is_buffered(kit->gvn());
 813     vt = kit->gvn().transform(vt)->as_InlineType();
 814     if (safe_for_replace) {
 815       kit->replace_in_map(this, vt);
 816     }
 817     return vt;
 818   }
 819   Node* buffered_ctl = kit->control();
 820   kit->set_control(not_buffered_ctl);
 821 
 822   // Inline type is not buffered, check if it is null.
 823   Node* null_ctl = kit->top();
 824   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 825   bool null_free = null_ctl->is_top();
 826 
 827   RegionNode* region = new RegionNode(4);
 828   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 829 
 830   // InlineType is already buffered
 831   region->init_req(1, buffered_ctl);
 832   oop->init_req(1, not_null_oop);
 833 
 834   // InlineType is null
 835   region->init_req(2, null_ctl);
 836   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 837 
 838   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 839   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 840 
 841   if (!kit->stopped()) {
 842     assert(!is_allocated(&kit->gvn()), "already buffered");
 843     PreserveJVMState pjvms(kit);
 844     ciInlineKlass* vk = inline_klass();
 845     // Allocate and initialize buffer, re-execute on deoptimization.
 846     kit->jvms()->set_bci(kit->bci());
 847     kit->jvms()->set_should_reexecute(true);
 848     kit->kill_dead_locals();
 849     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 850     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 851     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 852     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 853 
 854     // Do not let stores that initialize this buffer be reordered with a subsequent
 855     // store that would make this buffer accessible by other threads.
 856     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 857     assert(alloc != nullptr, "must have an allocation node");
 858     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 859     oop->init_req(3, alloc_oop);
 860     region->init_req(3, kit->control());
 861     io    ->init_req(3, kit->i_o());
 862     mem   ->init_req(3, kit->merged_memory());
 863   }
 864 
 865   // Update GraphKit
 866   kit->set_control(kit->gvn().transform(region));
 867   kit->set_i_o(kit->gvn().transform(io));
 868   kit->set_all_memory(kit->gvn().transform(mem));
 869   kit->record_for_igvn(region);
 870   kit->record_for_igvn(oop);
 871   kit->record_for_igvn(io);
 872   kit->record_for_igvn(mem);
 873 
 874   // Use cloned InlineTypeNode to propagate oop from now on
 875   Node* res_oop = kit->gvn().transform(oop);
 876   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 877   vt->set_oop(kit->gvn(), res_oop);
 878   vt->set_is_buffered(kit->gvn());
 879   vt = kit->gvn().transform(vt)->as_InlineType();
 880   if (safe_for_replace) {
 881     kit->replace_in_map(this, vt);
 882   }
 883   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 884   // Make sure it gets a chance to remove this allocation.
 885   kit->C->set_has_split_ifs(true);
 886   return vt;
 887 }
 888 
 889 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 890   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 891     return true;
 892   }
 893   Node* oop = get_oop();
 894   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 895   return !oop_type->maybe_null();
 896 }
 897 
 898 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 899   ProjNode* pn = call->proj_out_or_null(proj_idx);
 900   if (pn != nullptr) {
 901     C->gvn_replace_by(pn, value);
 902     C->initial_gvn()->hash_delete(pn);
 903     pn->set_req(0, C->top());
 904   }
 905   proj_idx += type2size[bt];
 906 }
 907 
 908 // When a call returns multiple values, it has several result
 909 // projections, one per field. Replacing the result of the call by an
 910 // inline type node (after late inlining) requires that for each result
 911 // projection, we find the corresponding inline type field.
 912 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 913   uint proj_idx = TypeFunc::Parms;
 914   // Replace oop projection
 915   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 916   // Replace field projections
 917   replace_field_projs(C, call, proj_idx);
 918   // Replace is_init projection
 919   replace_proj(C, call, proj_idx, get_is_init(), T_BOOLEAN);
 920   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 921 }
 922 
 923 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 924   for (uint i = 0; i < field_count(); ++i) {
 925     Node* value = field_value(i);
 926     if (field_is_flat(i)) {
 927       InlineTypeNode* vt = value->as_InlineType();
 928       // Replace field projections for flat field
 929       vt->replace_field_projs(C, call, proj_idx);
 930       if (!field_is_null_free(i)) {
 931         // Replace is_init projection for nullable field
 932         replace_proj(C, call, proj_idx, vt->get_is_init(), T_BOOLEAN);
 933       }
 934       continue;
 935     }
 936     // Replace projection for field value
 937     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
 938   }
 939 }
 940 
 941 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 942   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 943   for (uint i = 0; i < field_count(); i++) {
 944      Node* value = field_value(i);
 945      if (field_is_flat(i)) {
 946        // Flat inline type field
 947        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 948      } else if (value->is_InlineType()) {
 949        // Non-flat inline type field
 950        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 951      }
 952   }
 953   vt = kit->gvn().transform(vt)->as_InlineType();
 954   kit->replace_in_map(this, vt);
 955   return vt;
 956 }
 957 
 958 // Replace a buffer allocation by a dominating allocation
 959 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 960   // Remove initializing stores and GC barriers
 961   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 962     Node* use = res->fast_out(i);
 963     if (use->is_AddP()) {
 964       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 965         Node* store = use->fast_out(j)->isa_Store();
 966         if (store != nullptr) {
 967           igvn->rehash_node_delayed(store);
 968           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 969         }
 970       }
 971     } else if (use->Opcode() == Op_CastP2X) {
 972       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 973         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 974         // we store into, as well as the value we are storing. Skip if this is a
 975         // barrier for storing 'res' into another object.
 976         continue;
 977       }
 978       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 979       bs->eliminate_gc_barrier(igvn, use);
 980       --i; --imax;
 981     }
 982   }
 983   igvn->replace_node(res, dom);
 984 }
 985 
 986 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 987   Node* oop = get_oop();
 988   Node* is_buffered = get_is_buffered();
 989 
 990   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 991     InlineTypeNode* vtptr = oop->as_InlineType();
 992     set_oop(*phase, vtptr->get_oop());
 993     set_is_buffered(*phase);
 994     set_is_init(*phase);
 995     for (uint i = Values; i < vtptr->req(); ++i) {
 996       set_req(i, vtptr->in(i));
 997     }
 998     return this;
 999   }
1000 
1001   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
1002   // allocation because the only case we load from a naked CheckCastPP is when we exit a
1003   // constructor of an inline type and we want to relinquish the larval oop there. This has a
1004   // couple of benefits:
1005   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
1006   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
1007   //   when we try to clone a value object.
1008   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
1009   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
1010   //   future, we can move them around more freely such as hoisting out of loops. This is not true
1011   //   for the old allocation since larval value objects do have unique identities.
1012   Node* base = is_loaded(phase);
1013   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) {
1014     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
1015       set_oop(*phase, base);
1016       set_is_buffered(*phase);
1017       return this;
1018     }
1019   }
1020 
1021   if (can_reshape) {
1022     PhaseIterGVN* igvn = phase->is_IterGVN();
1023     if (is_allocated(phase)) {
1024       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1025       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1026       // This can happen with late inlining when we first allocate an inline type argument
1027       // but later decide to inline the call after the callee code also triggered allocation.
1028       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1029         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1030         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1031           // Found a re-allocation
1032           Node* res = alloc->result_cast();
1033           if (res != nullptr && res->is_CheckCastPP()) {
1034             // Replace allocation by oop and unlink AllocateNode
1035             replace_allocation(igvn, res, oop);
1036             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1037             --i; --imax;
1038           }
1039         }
1040       }
1041     }
1042   }
1043 
1044   return nullptr;
1045 }
1046 
1047 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1048   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1049   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1050   vt->set_is_buffered(gvn, false);
1051   vt->set_is_init(gvn);
1052   return vt;
1053 }
1054 
1055 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1056   GrowableArray<ciType*> visited;
1057   visited.push(vk);
1058   return make_all_zero_impl(gvn, vk, visited);
1059 }
1060 
1061 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1062   // Create a new InlineTypeNode initialized with all zero
1063   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1064   vt->set_is_buffered(gvn, false);
1065   vt->set_is_init(gvn);
1066   for (uint i = 0; i < vt->field_count(); ++i) {
1067     ciType* ft = vt->field_type(i);
1068     Node* value = gvn.zerocon(ft->basic_type());
1069     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1070       gvn.C->set_has_circular_inline_type(true);
1071     } else if (ft->is_inlinetype()) {
1072       int old_len = visited.length();
1073       visited.push(ft);
1074       ciInlineKlass* vk = ft->as_inline_klass();
1075       if (vt->field_is_null_free(i)) {
1076         value = make_all_zero_impl(gvn, vk, visited);
1077       } else {
1078         value = make_null_impl(gvn, vk, visited);
1079       }
1080       visited.trunc_to(old_len);
1081     }
1082     vt->set_field_value(i, value);
1083   }
1084   vt = gvn.transform(vt)->as_InlineType();
1085   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1086   return vt;
1087 }
1088 
1089 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1090   const TypeInt* tinit = gvn->type(get_is_init())->isa_int();
1091   if (tinit == nullptr || !tinit->is_con(1)) {
1092     return false; // May be null
1093   }
1094   for (uint i = 0; i < field_count(); ++i) {
1095     Node* value = field_value(i);
1096     if (field_is_null_free(i)) {
1097       // Null-free value class field must have the all-zero value. If 'flat' is set,
1098       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1099       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
1100         return false;
1101       }
1102       continue;
1103     } else if (value->is_InlineType()) {
1104       // Nullable value class field must be null
1105       tinit = gvn->type(value->as_InlineType()->get_is_init())->isa_int();
1106       if (tinit != nullptr && tinit->is_con(0)) {
1107         continue;
1108       }
1109       return false;
1110     } else if (!gvn->type(value)->is_zero_type()) {
1111       return false;
1112     }
1113   }
1114   return true;
1115 }
1116 
1117 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1118   GrowableArray<ciType*> visited;
1119   visited.push(vk);
1120   return make_from_oop_impl(kit, oop, vk, visited);
1121 }
1122 
1123 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1124   PhaseGVN& gvn = kit->gvn();
1125 
1126   // Create and initialize an InlineTypeNode by loading all field
1127   // values from a heap-allocated version and also save the oop.
1128   InlineTypeNode* vt = nullptr;
1129 
1130   if (oop->isa_InlineType()) {
1131     return oop->as_InlineType();
1132   }
1133 
1134   if (gvn.type(oop)->maybe_null()) {
1135     // Add a null check because the oop may be null
1136     Node* null_ctl = kit->top();
1137     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1138     if (kit->stopped()) {
1139       // Constant null
1140       kit->set_control(null_ctl);
1141       vt = make_null_impl(gvn, vk, visited);
1142       kit->record_for_igvn(vt);
1143       return vt;
1144     }
1145     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1146     vt->set_is_buffered(gvn);
1147     vt->set_is_init(gvn);
1148     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1149     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1150 
1151     if (null_ctl != kit->top()) {
1152       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1153       Node* region = new RegionNode(3);
1154       region->init_req(1, kit->control());
1155       region->init_req(2, null_ctl);
1156       vt = vt->clone_with_phis(&gvn, region, kit->map());
1157       vt->merge_with(&gvn, null_vt, 2, true);
1158       vt->set_oop(gvn, oop);
1159       kit->set_control(gvn.transform(region));
1160     }
1161   } else {
1162     // Oop can never be null
1163     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1164     Node* init_ctl = kit->control();
1165     vt->set_is_buffered(gvn);
1166     vt->set_is_init(gvn);
1167     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1168     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1169 // TODO 8284443
1170 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1171 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1172   }
1173   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1174   kit->record_for_igvn(vt);
1175   return gvn.transform(vt)->as_InlineType();
1176 }
1177 
1178 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1179                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1180   GrowableArray<ciType*> visited;
1181   visited.push(vk);
1182   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1183 }
1184 
1185 // GraphKit wrapper for the 'make_from_flat' method
1186 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1187                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1188   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1189   PhaseGVN& gvn = kit->gvn();
1190   bool do_atomic = atomic;
1191   // With immutable memory, a non-atomic load and an atomic load are the same
1192   if (immutable_memory) {
1193     do_atomic = false;
1194   }
1195   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1196   if (vk->is_naturally_atomic(null_free)) {
1197     do_atomic = false;
1198   }
1199 
1200   if (!do_atomic) {
1201     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1202     if (!null_free) {
1203       int nm_offset = vk->null_marker_offset_in_payload();
1204       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1205       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1206       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1207       vt->set_req(IsInit, nm_value);
1208     }
1209 
1210     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1211     return gvn.transform(vt)->as_InlineType();
1212   }
1213 
1214   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1215   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1216   BasicType load_bt = vk->atomic_size_to_basic_type(null_free);
1217   decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD;
1218   const Type* val_type = Type::get_const_basic_type(load_bt);
1219   kit->insert_mem_bar(Op_MemBarCPUOrder);
1220   Node* payload = kit->access_load_at(base, ptr, TypeRawPtr::BOTTOM, val_type, load_bt, decorators, kit->control());
1221   kit->insert_mem_bar(Op_MemBarCPUOrder);
1222   vt->convert_from_payload(kit, load_bt, kit->gvn().transform(payload), 0, null_free, trust_null_free_oop);
1223   return gvn.transform(vt)->as_InlineType();
1224 }
1225 
1226 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1227   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1228   PhaseGVN& gvn = kit->gvn();
1229   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1230   kit->C->set_flat_accesses();
1231   InlineTypeNode* vt_nullable = nullptr;
1232   InlineTypeNode* vt_null_free = nullptr;
1233   InlineTypeNode* vt_non_atomic = nullptr;
1234 
1235   RegionNode* region = new RegionNode(4);
1236   gvn.set_type(region, Type::CONTROL);
1237   kit->record_for_igvn(region);
1238 
1239   Node* input_memory_state = kit->reset_memory();
1240   kit->set_all_memory(input_memory_state);
1241 
1242   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1243   gvn.set_type(mem, Type::MEMORY);
1244   kit->record_for_igvn(mem);
1245 
1246   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1247   gvn.set_type(io, Type::ABIO);
1248   kit->record_for_igvn(io);
1249 
1250   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1251   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1252 
1253   // Nullable
1254   kit->set_control(kit->IfFalse(iff_null_free));
1255   if (!kit->stopped()) {
1256     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1257     kit->set_all_memory(input_memory_state);
1258     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
1259     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1260     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1261 
1262     region->init_req(1, kit->control());
1263     mem->set_req(1, kit->reset_memory());
1264     io->set_req(1, kit->i_o());
1265   }
1266 
1267   // Null-free
1268   kit->set_control(kit->IfTrue(iff_null_free));
1269   if (!kit->stopped()) {
1270     kit->set_all_memory(input_memory_state);
1271 
1272     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1273     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1274 
1275     // Atomic
1276     kit->set_control(kit->IfTrue(iff_atomic));
1277     if (!kit->stopped()) {
1278       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1279       kit->set_all_memory(input_memory_state);
1280       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
1281       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1282       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1283 
1284       region->init_req(2, kit->control());
1285       mem->set_req(2, kit->reset_memory());
1286       io->set_req(2, kit->i_o());
1287     }
1288 
1289     // Non-Atomic
1290     kit->set_control(kit->IfFalse(iff_atomic));
1291     if (!kit->stopped()) {
1292       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1293       kit->set_all_memory(input_memory_state);
1294       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
1295       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1296       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1297 
1298       region->init_req(3, kit->control());
1299       mem->set_req(3, kit->reset_memory());
1300       io->set_req(3, kit->i_o());
1301     }
1302   }
1303 
1304   InlineTypeNode* vt = nullptr;
1305   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1306     // All paths are dead
1307     vt = make_null(gvn, vk);
1308   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1309     vt = vt_non_atomic;
1310   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1311     vt = vt_null_free;
1312   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1313     vt = vt_nullable;
1314   }
1315   if (vt != nullptr) {
1316     kit->set_control(kit->gvn().transform(region));
1317     kit->set_all_memory(kit->gvn().transform(mem));
1318     kit->set_i_o(kit->gvn().transform(io));
1319     return vt;
1320   }
1321 
1322   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1323   vt = zero->clone_with_phis(&gvn, region);
1324   if (vt_nullable != nullptr) {
1325     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1326   }
1327   if (vt_null_free != nullptr) {
1328     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1329   }
1330   if (vt_non_atomic != nullptr) {
1331     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1332   }
1333 
1334   kit->set_control(kit->gvn().transform(region));
1335   kit->set_all_memory(kit->gvn().transform(mem));
1336   kit->set_i_o(kit->gvn().transform(io));
1337   return gvn.transform(vt)->as_InlineType();
1338 }
1339 
1340 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1341   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1342   if (!in) {
1343     // Keep track of the oop. The returned inline type might already be buffered.
1344     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1345     vt->set_oop(kit->gvn(), oop);
1346   }
1347   GrowableArray<ciType*> visited;
1348   visited.push(vk);
1349   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1350   return kit->gvn().transform(vt)->as_InlineType();
1351 }
1352 
1353 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1354   if (vk == nullptr) {
1355     vk = inline_klass();
1356   }
1357   for (uint i = 0; i < field_count(); ++i) {
1358     int offset = holder_offset + field_offset(i);
1359     Node* value = field_value(i);
1360     if (value->is_InlineType()) {
1361       InlineTypeNode* vt = value->as_InlineType();
1362       if (vt->type()->inline_klass()->is_empty()) {
1363         continue;
1364       } else if (field_is_flat(i) && vt->is_InlineType()) {
1365         // Check inline type field load recursively
1366         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1367         if (base == nullptr) {
1368           return nullptr;
1369         }
1370         continue;
1371       } else {
1372         value = vt->get_oop();
1373         if (value->Opcode() == Op_CastPP) {
1374           // Skip CastPP
1375           value = value->in(1);
1376         }
1377       }
1378     }
1379     if (value->isa_DecodeN()) {
1380       // Skip DecodeN
1381       value = value->in(1);
1382     }
1383     if (value->isa_Load()) {
1384       // Check if base and offset of field load matches inline type layout
1385       intptr_t loffset = 0;
1386       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1387       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1388         return nullptr;
1389       } else if (base == nullptr) {
1390         // Set base and check if pointer type matches
1391         base = lbase;
1392         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1393         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1394           return nullptr;
1395         }
1396       }
1397     } else {
1398       return nullptr;
1399     }
1400   }
1401   return base;
1402 }
1403 
1404 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1405   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1406   intptr_t bits = tk->get_con();
1407   set_nth_bit(bits, 0);
1408   return gvn.longcon((jlong)bits);
1409 }
1410 
1411 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1412   if (!null_free && in) {
1413     n->init_req(base_input++, get_is_init());
1414   }
1415   for (uint i = 0; i < field_count(); i++) {
1416     Node* arg = field_value(i);
1417     if (field_is_flat(i)) {
1418       // Flat inline type field
1419       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1420       if (!field_is_null_free(i)) {
1421         assert(field_null_marker_offset(i) != -1, "inconsistency");
1422         n->init_req(base_input++, arg->as_InlineType()->get_is_init());
1423       }
1424     } else {
1425       if (arg->is_InlineType()) {
1426         // Non-flat inline type field
1427         InlineTypeNode* vt = arg->as_InlineType();
1428         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1429         arg = vt->buffer(kit);
1430       }
1431       // Initialize call/return arguments
1432       n->init_req(base_input++, arg);
1433       if (field_type(i)->size() == 2) {
1434         n->init_req(base_input++, kit->top());
1435       }
1436     }
1437   }
1438   // The last argument is used to pass IsInit information to compiled code and not required here.
1439   if (!null_free && !in) {
1440     n->init_req(base_input++, kit->top());
1441   }
1442 }
1443 
1444 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1445   PhaseGVN& gvn = kit->gvn();
1446   Node* is_init = nullptr;
1447   if (!null_free) {
1448     // Nullable inline type
1449     if (in) {
1450       // Set IsInit field
1451       if (multi->is_Start()) {
1452         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1453       } else {
1454         is_init = multi->as_Call()->in(base_input);
1455       }
1456       set_req(IsInit, is_init);
1457       base_input++;
1458     }
1459     // Add a null check to make subsequent loads dependent on
1460     assert(null_check_region == nullptr, "already set");
1461     if (is_init == nullptr) {
1462       // Will only be initialized below, use dummy node for now
1463       is_init = new Node(1);
1464       is_init->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1465       gvn.set_type_bottom(is_init);
1466     }
1467     Node* null_ctrl = kit->top();
1468     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1469     Node* non_null_ctrl = kit->control();
1470     null_check_region = new RegionNode(3);
1471     null_check_region->init_req(1, non_null_ctrl);
1472     null_check_region->init_req(2, null_ctrl);
1473     null_check_region = gvn.transform(null_check_region);
1474     kit->set_control(null_check_region);
1475   }
1476 
1477   for (uint i = 0; i < field_count(); ++i) {
1478     ciType* type = field_type(i);
1479     Node* parm = nullptr;
1480     if (field_is_flat(i)) {
1481       // Flat inline type field
1482       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1483       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1484       if (!field_is_null_free(i)) {
1485         assert(field_null_marker_offset(i) != -1, "inconsistency");
1486         Node* is_init = nullptr;
1487         if (multi->is_Start()) {
1488           is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1489         } else if (in) {
1490           is_init = multi->as_Call()->in(base_input);
1491         } else {
1492           is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1493         }
1494         vt->set_req(IsInit, is_init);
1495         base_input++;
1496       }
1497       parm = gvn.transform(vt);
1498     } else {
1499       if (multi->is_Start()) {
1500         assert(in, "return from start?");
1501         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1502       } else if (in) {
1503         parm = multi->as_Call()->in(base_input);
1504       } else {
1505         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1506       }
1507       bool null_free = field_is_null_free(i);
1508       // Non-flat inline type field
1509       if (type->is_inlinetype()) {
1510         if (null_check_region != nullptr) {
1511           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1512           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1513           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1514           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1515             parm = parm->as_InlineType()->get_oop();
1516           }
1517           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1518           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1519           parm->set_req(2, kit->zerocon(T_OBJECT));
1520           parm = gvn.transform(parm);
1521           null_free = false;
1522         }
1523         if (visited.contains(type)) {
1524           kit->C->set_has_circular_inline_type(true);
1525         } else if (!parm->is_InlineType()) {
1526           int old_len = visited.length();
1527           visited.push(type);
1528           if (null_free) {
1529             parm = kit->cast_not_null(parm);
1530           }
1531           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1532           visited.trunc_to(old_len);
1533         }
1534       }
1535       base_input += type->size();
1536     }
1537     assert(parm != nullptr, "should never be null");
1538     assert(field_value(i) == nullptr, "already set");
1539     set_field_value(i, parm);
1540     gvn.record_for_igvn(parm);
1541   }
1542   // The last argument is used to pass IsInit information to compiled code
1543   if (!null_free && !in) {
1544     Node* cmp = is_init->raw_out(0);
1545     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1546     set_req(IsInit, is_init);
1547     gvn.hash_delete(cmp);
1548     cmp->set_req(1, is_init);
1549     gvn.hash_find_insert(cmp);
1550     gvn.record_for_igvn(cmp);
1551     base_input++;
1552   }
1553 }
1554 
1555 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1556 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1557 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1558   PhaseIterGVN* igvn = &phase->igvn();
1559   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1560   // will be removed anyway and changing the memory chain will confuse other optimizations.
1561   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1562     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1563     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1564       Node* res = alloc->result_cast();
1565       if (res == nullptr || !res->is_CheckCastPP()) {
1566         break; // No unique CheckCastPP
1567       }
1568       // Search for a dominating allocation of the same inline type
1569       Node* res_dom = res;
1570       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1571         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1572         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1573           Node* res_other = alloc_other->result_cast();
1574           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1575               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1576             res_dom = res_other;
1577           }
1578         }
1579       }
1580       if (res_dom != res) {
1581         // Replace allocation by dominating one.
1582         replace_allocation(igvn, res, res_dom);
1583         // The result of the dominated allocation is now unused and will be removed
1584         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1585         igvn->_worklist.push(alloc);
1586       }
1587     }
1588   }
1589 }
1590 
1591 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1592   GrowableArray<ciType*> visited;
1593   visited.push(vk);
1594   return make_null_impl(gvn, vk, visited, transform);
1595 }
1596 
1597 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1598   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1599   vt->set_is_buffered(gvn);
1600   vt->set_is_init(gvn, gvn.intcon(0));
1601   for (uint i = 0; i < vt->field_count(); i++) {
1602     ciType* ft = vt->field_type(i);
1603     Node* value = gvn.zerocon(ft->basic_type());
1604     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1605       gvn.C->set_has_circular_inline_type(true);
1606     } else if (ft->is_inlinetype()) {
1607       int old_len = visited.length();
1608       visited.push(ft);
1609       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1610       visited.trunc_to(old_len);
1611     }
1612     vt->set_field_value(i, value);
1613   }
1614   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1615 }
1616 
1617 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1618   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1619     return clone()->as_InlineType();
1620   }
1621   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1622     if (fast_out(i) != map) {
1623       return clone()->as_InlineType();
1624     }
1625   }
1626   gvn->hash_delete(this);
1627   return this;
1628 }
1629 
1630 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1631   Node* oop = get_oop();
1632   const Type* toop = phase->type(oop);
1633 #ifdef ASSERT
1634   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1635     // We are not allocated (anymore) and should therefore not have an instance id
1636     dump(1);
1637     assert(false, "Unbuffered inline type should not have known instance id");
1638   }
1639 #endif
1640   const Type* t = toop->filter_speculative(_type);
1641   if (t->singleton()) {
1642     // Don't replace InlineType by a constant
1643     t = _type;
1644   }
1645   const Type* tinit = phase->type(in(IsInit));
1646   if (tinit == Type::TOP) {
1647     return Type::TOP;
1648   }
1649   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1650     t = t->join_speculative(TypePtr::NOTNULL);
1651   }
1652   return t;
1653 }