1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/gc_globals.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/castnode.hpp"
  30 #include "opto/convertnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/idealKit.hpp"
  33 #include "opto/inlinetypenode.hpp"
  34 #include "opto/movenode.hpp"
  35 #include "opto/narrowptrnode.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/phaseX.hpp"
  38 
  39 // Clones the inline type to handle control flow merges involving multiple inline types.
  40 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  41 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) {
  42   InlineTypeNode* vt = clone_if_required(gvn, map);
  43   const Type* t = Type::get_const_type(inline_klass());
  44   gvn->set_type(vt, t);
  45   vt->as_InlineType()->set_type(t);
  46 
  47   // Create a PhiNode for merging the oop values
  48   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  49   gvn->set_type(oop, t);
  50   gvn->record_for_igvn(oop);
  51   vt->set_oop(*gvn, oop);
  52 
  53   // Create a PhiNode for merging the is_buffered values
  54   t = Type::get_const_basic_type(T_BOOLEAN);
  55   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  56   gvn->set_type(is_buffered_node, t);
  57   gvn->record_for_igvn(is_buffered_node);
  58   vt->set_req(IsBuffered, is_buffered_node);
  59 
  60   // Create a PhiNode for merging the is_init values
  61   Node* is_init_node;
  62   if (is_init) {
  63     is_init_node = gvn->intcon(1);
  64   } else {
  65     t = Type::get_const_basic_type(T_BOOLEAN);
  66     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  67     gvn->set_type(is_init_node, t);
  68     gvn->record_for_igvn(is_init_node);
  69   }
  70   vt->set_req(IsInit, is_init_node);
  71 
  72   // Create a PhiNode each for merging the field values
  73   for (uint i = 0; i < vt->field_count(); ++i) {
  74     ciType* type = vt->field_type(i);
  75     Node*  value = vt->field_value(i);
  76     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  77     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  78     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  79     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  80     if (type->is_inlinetype() && no_circularity) {
  81       // Handle inline type fields recursively
  82       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  83     } else {
  84       t = Type::get_const_type(type);
  85       value = PhiNode::make(region, value, t);
  86       gvn->set_type(value, t);
  87       gvn->record_for_igvn(value);
  88     }
  89     vt->set_field_value(i, value);
  90   }
  91   gvn->record_for_igvn(vt);
  92   return vt;
  93 }
  94 
  95 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  96 // for the given region (see InlineTypeNode::clone_with_phis).
  97 bool InlineTypeNode::has_phi_inputs(Node* region) {
  98   // Check oop input
  99   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 100 #ifdef ASSERT
 101   if (result) {
 102     // Check all field value inputs for consistency
 103     for (uint i = Values; i < field_count(); ++i) {
 104       Node* n = in(i);
 105       if (n->is_InlineType()) {
 106         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 107       } else {
 108         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 109       }
 110     }
 111   }
 112 #endif
 113   return result;
 114 }
 115 
 116 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 117 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 118   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 119 
 120   // Merge oop inputs
 121   PhiNode* phi = get_oop()->as_Phi();
 122   phi->set_req(pnum, other->get_oop());
 123   if (transform) {
 124     set_oop(*gvn, gvn->transform(phi));
 125   }
 126 
 127   // Merge is_buffered inputs
 128   phi = get_is_buffered()->as_Phi();
 129   phi->set_req(pnum, other->get_is_buffered());
 130   if (transform) {
 131     set_req(IsBuffered, gvn->transform(phi));
 132   }
 133 
 134   // Merge is_init inputs
 135   Node* is_init = get_is_init();
 136   if (is_init->is_Phi()) {
 137     phi = is_init->as_Phi();
 138     phi->set_req(pnum, other->get_is_init());
 139     if (transform) {
 140       set_req(IsInit, gvn->transform(phi));
 141     }
 142   } else {
 143     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 144   }
 145 
 146   // Merge field values
 147   for (uint i = 0; i < field_count(); ++i) {
 148     Node* val1 =        field_value(i);
 149     Node* val2 = other->field_value(i);
 150     if (val1->is_InlineType()) {
 151       if (val2->is_Phi()) {
 152         val2 = gvn->transform(val2);
 153       }
 154       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 155     } else {
 156       assert(val1->is_Phi(), "must be a phi node");
 157       val1->set_req(pnum, val2);
 158     }
 159     if (transform) {
 160       set_field_value(i, gvn->transform(val1));
 161     }
 162   }
 163   return this;
 164 }
 165 
 166 // Adds a new merge path to an inline type node with phi inputs
 167 void InlineTypeNode::add_new_path(Node* region) {
 168   assert(has_phi_inputs(region), "must have phi inputs");
 169 
 170   PhiNode* phi = get_oop()->as_Phi();
 171   phi->add_req(nullptr);
 172   assert(phi->req() == region->req(), "must be same size as region");
 173 
 174   phi = get_is_buffered()->as_Phi();
 175   phi->add_req(nullptr);
 176   assert(phi->req() == region->req(), "must be same size as region");
 177 
 178   phi = get_is_init()->as_Phi();
 179   phi->add_req(nullptr);
 180   assert(phi->req() == region->req(), "must be same size as region");
 181 
 182   for (uint i = 0; i < field_count(); ++i) {
 183     Node* val = field_value(i);
 184     if (val->is_InlineType()) {
 185       val->as_InlineType()->add_new_path(region);
 186     } else {
 187       val->as_Phi()->add_req(nullptr);
 188       assert(val->req() == region->req(), "must be same size as region");
 189     }
 190   }
 191 }
 192 
 193 Node* InlineTypeNode::field_value(uint index) const {
 194   assert(index < field_count(), "index out of bounds");
 195   return in(Values + index);
 196 }
 197 
 198 // Get the value of the null marker at the given offset.
 199 Node* InlineTypeNode::null_marker_by_offset(int offset, int holder_offset) const {
 200   // Search through the null markers of all flat fields
 201   for (uint i = 0; i < field_count(); ++i) {
 202     if (field_is_flat(i)) {
 203       InlineTypeNode* value = field_value(i)->as_InlineType();
 204       if (!field_is_null_free(i)) {
 205         int nm_offset = holder_offset + field_null_marker_offset(i);
 206         if (nm_offset == offset) {
 207           return value->get_is_init();
 208         }
 209       }
 210       int flat_holder_offset = holder_offset + field_offset(i) - value->inline_klass()->payload_offset();
 211       Node* nm_value = value->null_marker_by_offset(offset, flat_holder_offset);
 212       if (nm_value != nullptr) {
 213         return nm_value;
 214       }
 215     }
 216   }
 217   return nullptr;
 218 }
 219 
 220 // Get the value of the field at the given offset.
 221 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 222 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive, bool search_null_marker) const {
 223   // First check if we are loading a null marker which is not a real field
 224   if (recursive && search_null_marker) {
 225     Node* value = null_marker_by_offset(offset);
 226     if (value != nullptr){
 227       return value;
 228     }
 229   }
 230 
 231   // If the field at 'offset' belongs to a flat inline type field, 'index' refers to the
 232   // corresponding InlineTypeNode input and 'sub_offset' is the offset in the flattened inline type.
 233   int index = inline_klass()->field_index_by_offset(offset);
 234   int sub_offset = offset - field_offset(index);
 235   Node* value = field_value(index);
 236   assert(value != nullptr, "field value not found");
 237   if (recursive && value->is_InlineType()) {
 238     if (field_is_flat(index)) {
 239       // Flat inline type field
 240       InlineTypeNode* vt = value->as_InlineType();
 241       sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 242       return vt->field_value_by_offset(sub_offset, recursive, false);
 243     } else {
 244       assert(sub_offset == 0, "should not have a sub offset");
 245       return value;
 246     }
 247   }
 248   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 249   assert(sub_offset == 0, "offset mismatch");
 250   return value;
 251 }
 252 
 253 void InlineTypeNode::set_field_value(uint index, Node* value) {
 254   assert(index < field_count(), "index out of bounds");
 255   set_req(Values + index, value);
 256 }
 257 
 258 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 259   set_field_value(field_index(offset), value);
 260 }
 261 
 262 int InlineTypeNode::field_offset(uint index) const {
 263   assert(index < field_count(), "index out of bounds");
 264   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 265 }
 266 
 267 uint InlineTypeNode::field_index(int offset) const {
 268   uint i = 0;
 269   for (; i < field_count() && field_offset(i) != offset; i++) { }
 270   assert(i < field_count(), "field not found");
 271   return i;
 272 }
 273 
 274 ciType* InlineTypeNode::field_type(uint index) const {
 275   assert(index < field_count(), "index out of bounds");
 276   return inline_klass()->declared_nonstatic_field_at(index)->type();
 277 }
 278 
 279 bool InlineTypeNode::field_is_flat(uint index) const {
 280   assert(index < field_count(), "index out of bounds");
 281   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 282   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 283   return field->is_flat();
 284 }
 285 
 286 bool InlineTypeNode::field_is_null_free(uint index) const {
 287   assert(index < field_count(), "index out of bounds");
 288   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 289   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 290   return field->is_null_free();
 291 }
 292 
 293 bool InlineTypeNode::field_is_volatile(uint index) const {
 294   assert(index < field_count(), "index out of bounds");
 295   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 296   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 297   return field->is_volatile();
 298 }
 299 
 300 int InlineTypeNode::field_null_marker_offset(uint index) const {
 301   assert(index < field_count(), "index out of bounds");
 302   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 303   assert(field->is_flat(), "must be an inline type");
 304   return field->null_marker_offset();
 305 }
 306 
 307 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, Node_List& null_markers, SafePointNode* sfpt) {
 308   uint cnt = 0;
 309   for (uint i = 0; i < field_count(); ++i) {
 310     Node* value = field_value(i);
 311     if (field_is_flat(i)) {
 312       InlineTypeNode* vt = value->as_InlineType();
 313       cnt += vt->add_fields_to_safepoint(worklist, null_markers, sfpt);
 314       if (!field_is_null_free(i)) {
 315         null_markers.push(vt->get_is_init());
 316         cnt++;
 317       }
 318       continue;
 319     }
 320     if (value->is_InlineType()) {
 321       // Add inline type to the worklist to process later
 322       worklist.push(value);
 323     }
 324     sfpt->add_req(value);
 325     cnt++;
 326   }
 327   return cnt;
 328 }
 329 
 330 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 331   // We should not scalarize larvals in debug info of their constructor calls because their fields could still be
 332   // updated. If we scalarize and update the fields in the constructor, the updates won't be visible in the caller after
 333   // deoptimization because the scalarized field values are local to the caller. We need to use a buffer to make the
 334   // updates visible to the outside.
 335   if (is_larval() && sfpt->is_CallJava() && sfpt->as_CallJava()->method() != nullptr &&
 336       sfpt->as_CallJava()->method()->is_object_constructor() && bottom_type()->is_inlinetypeptr() &&
 337       sfpt->in(TypeFunc::Parms) == this) {
 338     // Receiver is always buffered because it's passed as oop, see special case in CompiledEntrySignature::compute_calling_conventions().
 339     assert(is_allocated(igvn), "receiver must be allocated");
 340     return;
 341   }
 342 
 343   JVMState* jvms = sfpt->jvms();
 344   assert(jvms != nullptr, "missing JVMS");
 345   uint first_ind = (sfpt->req() - jvms->scloff());
 346 
 347   // Iterate over the inline type fields in order of increasing offset and add the
 348   // field values to the safepoint. Nullable inline types have an IsInit field that
 349   // needs to be checked before using the field values.
 350   const TypeInt* tinit = igvn->type(get_is_init())->isa_int();
 351   if (tinit != nullptr && !tinit->is_con(1)) {
 352     sfpt->add_req(get_is_init());
 353   } else {
 354     sfpt->add_req(igvn->C->top());
 355   }
 356   Node_List null_markers;
 357   uint nfields = add_fields_to_safepoint(worklist, null_markers, sfpt);
 358   // Add null markers after the field values
 359   for (uint i = 0; i < null_markers.size(); ++i) {
 360     sfpt->add_req(null_markers.at(i));
 361   }
 362   jvms->set_endoff(sfpt->req());
 363   // Replace safepoint edge by SafePointScalarObjectNode
 364   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 365                                                                   nullptr,
 366                                                                   first_ind,
 367                                                                   sfpt->jvms()->depth(),
 368                                                                   nfields);
 369   sobj->init_req(0, igvn->C->root());
 370   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 371   igvn->rehash_node_delayed(sfpt);
 372   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 373     Node* debug = sfpt->in(i);
 374     if (debug != nullptr && debug->uncast() == this) {
 375       sfpt->set_req(i, sobj);
 376     }
 377   }
 378 }
 379 
 380 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 381   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 382   // in the safepoint to avoid keeping field loads live just for the debug info.
 383   Node* oop = get_oop();
 384   bool use_oop = false;
 385   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 386     Unique_Node_List worklist;
 387     VectorSet visited;
 388     visited.set(oop->_idx);
 389     worklist.push(oop);
 390     use_oop = true;
 391     while (worklist.size() > 0 && use_oop) {
 392       Node* n = worklist.pop();
 393       for (uint i = 1; i < n->req(); i++) {
 394         Node* in = n->in(i);
 395         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 396           worklist.push(in);
 397         } else if (!(in->is_Con() || in->is_Parm())) {
 398           use_oop = false;
 399           break;
 400         }
 401       }
 402     }
 403   } else {
 404     use_oop = allow_oop && is_allocated(igvn) &&
 405               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 406   }
 407 
 408   ResourceMark rm;
 409   Unique_Node_List safepoints;
 410   Unique_Node_List vt_worklist;
 411   Unique_Node_List worklist;
 412   worklist.push(this);
 413   while (worklist.size() > 0) {
 414     Node* n = worklist.pop();
 415     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 416       Node* use = n->fast_out(i);
 417       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 418         safepoints.push(use);
 419       } else if (use->is_ConstraintCast()) {
 420         worklist.push(use);
 421       }
 422     }
 423   }
 424 
 425   // Process all safepoint uses and scalarize inline type
 426   while (safepoints.size() > 0) {
 427     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 428     if (use_oop) {
 429       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 430         Node* debug = sfpt->in(i);
 431         if (debug != nullptr && debug->uncast() == this) {
 432           sfpt->set_req(i, get_oop());
 433         }
 434       }
 435       igvn->rehash_node_delayed(sfpt);
 436     } else {
 437       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 438     }
 439   }
 440   // Now scalarize non-flat fields
 441   for (uint i = 0; i < vt_worklist.size(); ++i) {
 442     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 443     vt->make_scalar_in_safepoints(igvn);
 444   }
 445   if (outcnt() == 0) {
 446     igvn->record_for_igvn(this);
 447   }
 448 }
 449 
 450 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 451   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 452   const TypePtr* adr_type = nullptr;
 453   bool is_array = ary_type != nullptr;
 454   if ((decorators & C2_MISMATCHED) != 0) {
 455     adr_type = TypeRawPtr::BOTTOM;
 456   } else if (is_array) {
 457     // In the case of a flat inline type array, each field has its own slice
 458     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 459   } else {
 460     ciField* field = holder->get_field_by_offset(offset, false);
 461     assert(field != nullptr, "field not found");
 462     adr_type = gvn.C->alias_type(field)->adr_type();
 463   }
 464   return adr_type;
 465 }
 466 
 467 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 468 // of the same type but with different scalarization depth during GVN. This method adjusts the
 469 // scalarization depth to avoid inconsistencies during merging.
 470 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 471   if (!kit->C->has_circular_inline_type()) {
 472     return this;
 473   }
 474   GrowableArray<ciType*> visited;
 475   visited.push(inline_klass());
 476   return adjust_scalarization_depth_impl(kit, visited);
 477 }
 478 
 479 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 480   InlineTypeNode* val = this;
 481   for (uint i = 0; i < field_count(); ++i) {
 482     Node* value = field_value(i);
 483     Node* new_value = value;
 484     ciType* ft = field_type(i);
 485     if (value->is_InlineType()) {
 486       if (!field_is_flat(i) && visited.contains(ft)) {
 487         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 488       } else {
 489         int old_len = visited.length();
 490         visited.push(ft);
 491         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 492         visited.trunc_to(old_len);
 493       }
 494     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 495       int old_len = visited.length();
 496       visited.push(ft);
 497       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 498       visited.trunc_to(old_len);
 499     }
 500     if (value != new_value) {
 501       if (val == this) {
 502         val = clone_if_required(&kit->gvn(), kit->map());
 503       }
 504       val->set_field_value(i, new_value);
 505     }
 506   }
 507   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 508 }
 509 
 510 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) {
 511   // Initialize the inline type by loading its field values from
 512   // memory and adding the values as input edges to the node.
 513   for (uint i = 0; i < field_count(); ++i) {
 514     int offset = holder_offset + field_offset(i);
 515     Node* value = nullptr;
 516     ciType* ft = field_type(i);
 517     bool null_free = field_is_null_free(i);
 518     if (null_free && ft->as_inline_klass()->is_empty()) {
 519       // Loading from a field of an empty inline type. Just return the all-zero instance.
 520       value = make_all_zero_impl(kit->gvn(), ft->as_inline_klass(), visited);
 521     } else if (field_is_flat(i)) {
 522       // Recursively load the flat inline type field
 523       bool needs_atomic_access = !null_free || field_is_volatile(i);
 524       assert(!needs_atomic_access, "Atomic access in non-atomic container");
 525       int nm_offset = field_is_null_free(i) ? -1 : (holder_offset + field_null_marker_offset(i));
 526       value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, nullptr, holder, offset, false, nm_offset, decorators, visited);
 527     } else {
 528       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 529       bool is_array = (oop_ptr->isa_aryptr() != nullptr);
 530       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 531       if (base->is_Con() && oop_ptr->is_inlinetypeptr() && !is_array && !mismatched) {
 532         // If the oop to the inline type is constant (static final field), we can
 533         // also treat the fields as constants because the inline type is immutable.
 534         ciObject* constant_oop = oop_ptr->const_oop();
 535         ciField* field = holder->get_field_by_offset(offset, false);
 536         assert(field != nullptr, "field not found");
 537         ciConstant constant = constant_oop->as_instance()->field_value(field);
 538         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 539         assert(con_type != nullptr, "type not found");
 540         value = kit->gvn().transform(kit->makecon(con_type));
 541         // Check type of constant which might be more precise than the static field type
 542         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 543           ft = con_type->inline_klass();
 544         }
 545       } else {
 546         // Load field value from memory
 547         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 548         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 549         BasicType bt = type2field[ft->basic_type()];
 550         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 551         const Type* val_type = Type::get_const_type(ft);
 552         if (null_free) {
 553           val_type = val_type->join_speculative(TypePtr::NOTNULL);
 554         }
 555         value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 556       }
 557       // Loading a non-flattened inline type from memory
 558       if (visited.contains(ft)) {
 559         kit->C->set_has_circular_inline_type(true);
 560       } else if (ft->is_inlinetype()) {
 561         int old_len = visited.length();
 562         visited.push(ft);
 563         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 564         visited.trunc_to(old_len);
 565       }
 566     }
 567     set_field_value(i, value);
 568   }
 569 }
 570 
 571 // Get a field value from the payload by shifting it according to the offset
 572 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) {
 573   // Shift to the right position in the long value
 574   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 575   Node* value = nullptr;
 576   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 577   if (bt == T_LONG) {
 578     value = gvn->transform(new URShiftLNode(payload, shift_val));
 579     value = gvn->transform(new ConvL2INode(value));
 580   } else {
 581     value = gvn->transform(new URShiftINode(payload, shift_val));
 582   }
 583 
 584   if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) {
 585     return value;
 586   } else {
 587     // Make sure to zero unused bits in the 32-bit value
 588     return Compile::narrow_value(val_bt, value, nullptr, gvn, true);
 589   }
 590 }
 591 
 592 // Convert a payload value to field values
 593 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset) {
 594   PhaseGVN* gvn = &kit->gvn();
 595   Node* value = nullptr;
 596   if (!null_free) {
 597     // Get the null marker
 598     value = get_payload_value(gvn, payload, bt, T_BOOLEAN, null_marker_offset);
 599     set_req(IsInit, value);
 600   }
 601   // Iterate over the fields and get their values from the payload
 602   for (uint i = 0; i < field_count(); ++i) {
 603     ciType* ft = field_type(i);
 604     bool field_null_free = field_is_null_free(i);
 605     int offset = holder_offset + field_offset(i) - inline_klass()->payload_offset();
 606     if (field_is_flat(i)) {
 607       null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset();
 608       InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free);
 609       vt->convert_from_payload(kit, bt, payload, offset, field_null_free, null_marker_offset);
 610       value = gvn->transform(vt);
 611     } else {
 612       value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset);
 613       if (!ft->is_primitive_type()) {
 614         // Narrow oop field
 615         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 616         const Type* val_type = Type::get_const_type(ft);
 617         if (field_null_free) {
 618           val_type = val_type->join_speculative(TypePtr::NOTNULL);
 619         }
 620         value = gvn->transform(new CastI2NNode(kit->control(), value));
 621         value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop()));
 622         // TODO 8350865 Should we add the membar to the CastI2N and give it a type?
 623         value = gvn->transform(new CastPPNode(kit->control(), value, val_type, ConstraintCastNode::UnconditionalDependency));
 624         // Prevent the CastI2N from floating below a safepoint
 625         kit->insert_mem_bar(Op_MemBarVolatile, value);
 626 
 627         if (ft->is_inlinetype()) {
 628           GrowableArray<ciType*> visited;
 629           value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 630         }
 631       }
 632     }
 633     set_field_value(i, value);
 634   }
 635 }
 636 
 637 // Set a field value in the payload by shifting it according to the offset
 638 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) {
 639   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 640 
 641   // Make sure to zero unused bits in the 32-bit value
 642   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
 643     value = gvn->transform(new AndINode(value, gvn->intcon(0xFF)));
 644   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
 645     value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF)));
 646   } else if (val_bt == T_FLOAT) {
 647     value = gvn->transform(new MoveF2INode(value));
 648   } else {
 649     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
 650   }
 651 
 652   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 653   if (bt == T_LONG) {
 654     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
 655     value = gvn->transform(new ConvI2LNode(value));
 656     value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF)));
 657 
 658     Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val));
 659     payload = new OrLNode(shift_value, payload);
 660   } else {
 661     Node* shift_value = gvn->transform(new LShiftINode(value, shift_val));
 662     payload = new OrINode(shift_value, payload);
 663   }
 664   return gvn->transform(payload);
 665 }
 666 
 667 // Convert the field values to a payload value of type 'bt'
 668 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const {
 669   PhaseGVN* gvn = &kit->gvn();
 670   Node* value = nullptr;
 671   if (!null_free) {
 672     // Set the null marker
 673     value = get_is_init();
 674     payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset);
 675   }
 676   // Iterate over the fields and add their values to the payload
 677   for (uint i = 0; i < field_count(); ++i) {
 678     value = field_value(i);
 679     int inner_offset = field_offset(i) - inline_klass()->payload_offset();
 680     int offset = holder_offset + inner_offset;
 681     if (field_is_flat(i)) {
 682       null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset();
 683       payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2);
 684     } else {
 685       ciType* ft = field_type(i);
 686       BasicType field_bt = ft->basic_type();
 687       if (!ft->is_primitive_type()) {
 688         // Narrow oop field
 689         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 690         if (oop_off_1 == -1) {
 691           oop_off_1 = inner_offset;
 692         } else {
 693           assert(oop_off_2 == -1, "already set");
 694           oop_off_2 = inner_offset;
 695         }
 696         const Type* val_type = Type::get_const_type(ft)->make_narrowoop();
 697         value = gvn->transform(new EncodePNode(value, val_type));
 698         value = gvn->transform(new CastP2XNode(kit->control(), value));
 699         value = gvn->transform(new ConvL2INode(value));
 700         field_bt = T_INT;
 701       }
 702       payload = set_payload_value(gvn, payload, bt, value, field_bt, offset);
 703     }
 704   }
 705   return payload;
 706 }
 707 
 708 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, bool atomic, int null_marker_offset, DecoratorSet decorators) const {
 709   if (kit->gvn().type(base)->isa_aryptr()) {
 710     kit->C->set_flat_accesses();
 711   }
 712   ciInlineKlass* vk = inline_klass();
 713   bool null_free = (null_marker_offset == -1);
 714 
 715   if (atomic) {
 716 #ifdef ASSERT
 717     bool is_naturally_atomic = null_free && vk->nof_declared_nonstatic_fields() <= 1;
 718     assert(!is_naturally_atomic, "No atomic access required");
 719 #endif
 720     // Convert to a payload value <= 64-bit and write atomically.
 721     // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
 722     // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
 723     // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
 724     // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
 725     BasicType bt = vk->atomic_size_to_basic_type(null_free);
 726     Node* payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0);
 727     int oop_off_1 = -1;
 728     int oop_off_2 = -1;
 729     payload = convert_to_payload(kit, bt, payload, 0, null_free, null_marker_offset - holder_offset, oop_off_1, oop_off_2);
 730 
 731     if (!UseG1GC || oop_off_1 == -1) {
 732       // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
 733       assert(oop_off_2 == -1 || !UseG1GC, "sanity");
 734       // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
 735       assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
 736       const Type* val_type = Type::get_const_basic_type(bt);
 737       decorators |= C2_MISMATCHED;
 738 
 739       bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr);
 740 
 741       if (!is_array) {
 742         Node* adr = kit->basic_plus_adr(base, ptr, holder_offset);
 743         kit->access_store_at(base, adr, TypeRawPtr::BOTTOM, payload, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, true, this);
 744       } else {
 745         assert(holder_offset == 0, "sanity");
 746 
 747         RegionNode* region = new RegionNode(3);
 748         kit->gvn().set_type(region, Type::CONTROL);
 749         kit->record_for_igvn(region);
 750 
 751         Node* bol = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 752         IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN);
 753 
 754         Node* input_memory_state = kit->reset_memory();
 755         kit->set_all_memory(input_memory_state);
 756 
 757         Node* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 758         kit->gvn().set_type(mem, Type::MEMORY);
 759         kit->record_for_igvn(mem);
 760 
 761         PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 762         kit->gvn().set_type(io, Type::ABIO);
 763         kit->record_for_igvn(io);
 764 
 765         kit->set_control(kit->IfFalse(iff));
 766         region->init_req(1, kit->control());
 767 
 768         // Nullable
 769         if (!kit->stopped()) {
 770           assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable");
 771           kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, true, this);
 772           mem->init_req(1, kit->reset_memory());
 773           io->init_req(1, kit->i_o());
 774         }
 775 
 776         kit->set_control(kit->IfTrue(iff));
 777 
 778         // Null-free
 779         if (!kit->stopped()) {
 780           kit->set_all_memory(input_memory_state);
 781 
 782           // Check if it's atomic
 783           RegionNode* region_null_free = new RegionNode(3);
 784           kit->gvn().set_type(region_null_free, Type::CONTROL);
 785           kit->record_for_igvn(region_null_free);
 786 
 787           Node* mem_null_free = PhiNode::make(region_null_free, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 788           kit->gvn().set_type(mem_null_free, Type::MEMORY);
 789           kit->record_for_igvn(mem_null_free);
 790 
 791           PhiNode* io_null_free = PhiNode::make(region_null_free, kit->i_o(), Type::ABIO);
 792           kit->gvn().set_type(io_null_free, Type::ABIO);
 793           kit->record_for_igvn(io_null_free);
 794 
 795           Node* bol = kit->null_free_atomic_array_test(base, vk);
 796           IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN);
 797 
 798           kit->set_control(kit->IfTrue(iff));
 799           region_null_free->init_req(1, kit->control());
 800 
 801           // Atomic
 802           if (!kit->stopped()) {
 803             BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true);
 804             const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free);
 805             kit->set_all_memory(input_memory_state);
 806 
 807             if (bt == T_LONG && bt_null_free != T_LONG) {
 808               payload = kit->gvn().transform(new ConvL2INode(payload));
 809             }
 810 
 811             Node* cast = base;
 812             Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true);
 813             kit->access_store_at(cast, adr, TypeRawPtr::BOTTOM, payload, val_type_null_free, bt_null_free, is_array ? (decorators | IS_ARRAY) : decorators, true, this);
 814             mem_null_free->init_req(1, kit->reset_memory());
 815             io_null_free->init_req(1, kit->i_o());
 816           }
 817 
 818           kit->set_control(kit->IfFalse(iff));
 819           region_null_free->init_req(2, kit->control());
 820 
 821           // Non-Atomic
 822           if (!kit->stopped()) {
 823             kit->set_all_memory(input_memory_state);
 824 
 825             Node* cast = base;
 826             Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false);
 827             store(kit, cast, adr, holder, holder_offset - vk->payload_offset(), -1, decorators);
 828 
 829             mem_null_free->init_req(2, kit->reset_memory());
 830             io_null_free->init_req(2, kit->i_o());
 831           }
 832 
 833           mem->init_req(2, kit->gvn().transform(mem_null_free));
 834           io->init_req(2, kit->gvn().transform(io_null_free));
 835           region->init_req(2, kit->gvn().transform(region_null_free));
 836         }
 837 
 838         kit->set_control(kit->gvn().transform(region));
 839         kit->set_all_memory(kit->gvn().transform(mem));
 840         kit->set_i_o(kit->gvn().transform(io));
 841       }
 842     } else {
 843       // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
 844       assert(UseG1GC, "Unexpected GC");
 845       assert(bt == T_LONG, "Unexpected payload type");
 846       const TypePtr* adr_type = TypeRawPtr::BOTTOM;
 847       Node* mem = kit->memory(adr_type);
 848       // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
 849       Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr;
 850       Node* adr = kit->basic_plus_adr(base, ptr, holder_offset);
 851       Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, adr, adr_type, payload, oop_offset, MemNode::unordered));
 852       kit->set_memory(st, adr_type);
 853     }
 854     // Prevent loads from floating above this mismatched store
 855     kit->insert_mem_bar(Op_MemBarCPUOrder);
 856     return;
 857   }
 858   assert(null_free, "Nullable flat implies atomic");
 859 
 860   // The inline type is embedded into the object without an oop header. Subtract the
 861   // offset of the first field to account for the missing header when storing the values.
 862   if (holder == nullptr) {
 863     holder = vk;
 864   }
 865   holder_offset -= vk->payload_offset();
 866   store(kit, base, ptr, holder, holder_offset, -1, decorators);
 867 }
 868 
 869 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, int offsetOnly, DecoratorSet decorators) const {
 870   // Write field values to memory
 871   for (uint i = 0; i < field_count(); ++i) {
 872     if (offsetOnly != -1 && offsetOnly != field_offset(i)) continue;
 873     int offset = holder_offset + field_offset(i);
 874     Node* value = field_value(i);
 875     ciType* ft = field_type(i);
 876     if (field_is_flat(i)) {
 877       // Recursively store the flat inline type field
 878       bool needs_atomic_access = !field_is_null_free(i) || field_is_volatile(i);
 879       assert(!needs_atomic_access, "Atomic access in non-atomic container");
 880       int nm_offset = field_is_null_free(i) ? -1 : (holder_offset + field_null_marker_offset(i));
 881       value->as_InlineType()->store_flat(kit, base, ptr, nullptr, holder, offset, false, nm_offset, decorators);
 882     } else {
 883       // Store field value to memory
 884       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 885       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 886       BasicType bt = type2field[ft->basic_type()];
 887       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 888       const Type* val_type = Type::get_const_type(ft);
 889       bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr);
 890       kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 891     }
 892   }
 893 }
 894 
 895 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace, bool must_init) {
 896   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 897     // Already buffered
 898     return this;
 899   }
 900 
 901   // Check if inline type is already buffered
 902   Node* not_buffered_ctl = kit->top();
 903   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 904   if (not_buffered_ctl->is_top()) {
 905     // Already buffered
 906     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 907     vt->set_is_buffered(kit->gvn());
 908     vt = kit->gvn().transform(vt)->as_InlineType();
 909     if (safe_for_replace) {
 910       kit->replace_in_map(this, vt);
 911     }
 912     return vt;
 913   }
 914   Node* buffered_ctl = kit->control();
 915   kit->set_control(not_buffered_ctl);
 916 
 917   // Inline type is not buffered, check if it is null.
 918   Node* null_ctl = kit->top();
 919   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 920   bool null_free = null_ctl->is_top();
 921 
 922   RegionNode* region = new RegionNode(4);
 923   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 924 
 925   // InlineType is already buffered
 926   region->init_req(1, buffered_ctl);
 927   oop->init_req(1, not_null_oop);
 928 
 929   // InlineType is null
 930   region->init_req(2, null_ctl);
 931   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 932 
 933   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 934   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 935 
 936   if (!kit->stopped()) {
 937     assert(!is_allocated(&kit->gvn()), "already buffered");
 938     PreserveJVMState pjvms(kit);
 939     ciInlineKlass* vk = inline_klass();
 940     // Allocate and initialize buffer, re-execute on deoptimization.
 941     kit->jvms()->set_bci(kit->bci());
 942     kit->jvms()->set_should_reexecute(true);
 943     kit->kill_dead_locals();
 944     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 945     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 946 
 947     if (must_init) {
 948       // Either not a larval or a larval receiver on which we are about to invoke an abstract value class constructor
 949       // or the Object constructor which is not inlined. It is therefore escaping, and we must initialize the buffer
 950       // because we have not done this, yet, for larvals (see else case).
 951       store(kit, alloc_oop, alloc_oop, vk);
 952 
 953       // Do not let stores that initialize this buffer be reordered with a subsequent
 954       // store that would make this buffer accessible by other threads.
 955       AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 956       assert(alloc != nullptr, "must have an allocation node");
 957       kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 958     } else {
 959       // We do not need to initialize the buffer because a larval could still be updated which will create a new buffer.
 960       // Once the larval escapes, we will initialize the buffer (must_init set).
 961       assert(is_larval(), "only larvals can possibly skip the initialization of their buffer");
 962     }
 963     oop->init_req(3, alloc_oop);
 964     region->init_req(3, kit->control());
 965     io    ->init_req(3, kit->i_o());
 966     mem   ->init_req(3, kit->merged_memory());
 967   }
 968 
 969   // Update GraphKit
 970   kit->set_control(kit->gvn().transform(region));
 971   kit->set_i_o(kit->gvn().transform(io));
 972   kit->set_all_memory(kit->gvn().transform(mem));
 973   kit->record_for_igvn(region);
 974   kit->record_for_igvn(oop);
 975   kit->record_for_igvn(io);
 976   kit->record_for_igvn(mem);
 977 
 978   // Use cloned InlineTypeNode to propagate oop from now on
 979   Node* res_oop = kit->gvn().transform(oop);
 980   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 981   vt->set_oop(kit->gvn(), res_oop);
 982   vt->set_is_buffered(kit->gvn());
 983   vt = kit->gvn().transform(vt)->as_InlineType();
 984   if (safe_for_replace) {
 985     kit->replace_in_map(this, vt);
 986   }
 987   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 988   // Make sure it gets a chance to remove this allocation.
 989   kit->C->set_has_split_ifs(true);
 990   return vt;
 991 }
 992 
 993 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 994   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 995     return true;
 996   }
 997   Node* oop = get_oop();
 998   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 999   return !oop_type->maybe_null();
1000 }
1001 
1002 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
1003   ProjNode* pn = call->proj_out_or_null(proj_idx);
1004   if (pn != nullptr) {
1005     C->gvn_replace_by(pn, value);
1006     C->initial_gvn()->hash_delete(pn);
1007     pn->set_req(0, C->top());
1008   }
1009   proj_idx += type2size[bt];
1010 }
1011 
1012 // When a call returns multiple values, it has several result
1013 // projections, one per field. Replacing the result of the call by an
1014 // inline type node (after late inlining) requires that for each result
1015 // projection, we find the corresponding inline type field.
1016 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
1017   uint proj_idx = TypeFunc::Parms;
1018   // Replace oop projection
1019   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
1020   // Replace field projections
1021   replace_field_projs(C, call, proj_idx);
1022   // Replace is_init projection
1023   replace_proj(C, call, proj_idx, get_is_init(), T_BOOLEAN);
1024   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
1025 }
1026 
1027 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
1028   for (uint i = 0; i < field_count(); ++i) {
1029     Node* value = field_value(i);
1030     if (field_is_flat(i)) {
1031       InlineTypeNode* vt = value->as_InlineType();
1032       // Replace field projections for flat field
1033       vt->replace_field_projs(C, call, proj_idx);
1034       if (!field_is_null_free(i)) {
1035         // Replace is_init projection for nullable field
1036         replace_proj(C, call, proj_idx, vt->get_is_init(), T_BOOLEAN);
1037       }
1038       continue;
1039     }
1040     // Replace projection for field value
1041     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
1042   }
1043 }
1044 
1045 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
1046   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
1047   for (uint i = 0; i < field_count(); i++) {
1048      Node* value = field_value(i);
1049      if (field_is_flat(i)) {
1050        // Flat inline type field
1051        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
1052      } else if (value->is_InlineType()) {
1053        // Non-flat inline type field
1054        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
1055      }
1056   }
1057   vt = kit->gvn().transform(vt)->as_InlineType();
1058   kit->replace_in_map(this, vt);
1059   return vt;
1060 }
1061 
1062 // Replace a buffer allocation by a dominating allocation
1063 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
1064   // Remove initializing stores and GC barriers
1065   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
1066     Node* use = res->fast_out(i);
1067     if (use->is_AddP()) {
1068       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1069         Node* store = use->fast_out(j)->isa_Store();
1070         if (store != nullptr) {
1071           igvn->rehash_node_delayed(store);
1072           igvn->replace_in_uses(store, store->in(MemNode::Memory));
1073         }
1074       }
1075     } else if (use->Opcode() == Op_CastP2X) {
1076       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
1077         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
1078         // we store into, as well as the value we are storing. Skip if this is a
1079         // barrier for storing 'res' into another object.
1080         continue;
1081       }
1082       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1083       bs->eliminate_gc_barrier(igvn, use);
1084       --i; --imax;
1085     }
1086   }
1087   igvn->replace_node(res, dom);
1088 }
1089 
1090 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1091   Node* oop = get_oop();
1092   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
1093     InlineTypeNode* vtptr = oop->as_InlineType();
1094     set_oop(*phase, vtptr->get_oop());
1095     set_is_buffered(*phase);
1096     set_is_init(*phase);
1097     for (uint i = Values; i < vtptr->req(); ++i) {
1098       set_req(i, vtptr->in(i));
1099     }
1100     return this;
1101   }
1102 
1103   // Use base oop if fields are loaded from memory
1104   Node* base = is_loaded(phase);
1105   if (base != nullptr && get_oop() != base && !phase->type(base)->maybe_null()) {
1106     set_oop(*phase, base);
1107     assert(is_allocated(phase), "should now be allocated");
1108     return this;
1109   }
1110 
1111   if (can_reshape) {
1112     PhaseIterGVN* igvn = phase->is_IterGVN();
1113     if (is_allocated(phase)) {
1114       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1115       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1116       // This can happen with late inlining when we first allocate an inline type argument
1117       // but later decide to inline the call after the callee code also triggered allocation.
1118       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1119         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1120         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1121           // Found a re-allocation
1122           Node* res = alloc->result_cast();
1123           if (res != nullptr && res->is_CheckCastPP()) {
1124             // Replace allocation by oop and unlink AllocateNode
1125             replace_allocation(igvn, res, oop);
1126             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1127             --i; --imax;
1128           }
1129         }
1130       }
1131     }
1132   }
1133 
1134   return nullptr;
1135 }
1136 
1137 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1138   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1139   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1140   vt->set_is_buffered(gvn, false);
1141   vt->set_is_init(gvn);
1142   return vt;
1143 }
1144 
1145 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval) {
1146   GrowableArray<ciType*> visited;
1147   visited.push(vk);
1148   return make_all_zero_impl(gvn, vk, visited, is_larval);
1149 }
1150 
1151 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) {
1152   // Create a new InlineTypeNode initialized with all zero
1153   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1154   vt->set_is_buffered(gvn, false);
1155   vt->set_is_init(gvn);
1156   vt->set_is_larval(is_larval);
1157   for (uint i = 0; i < vt->field_count(); ++i) {
1158     ciType* ft = vt->field_type(i);
1159     Node* value = gvn.zerocon(ft->basic_type());
1160     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1161       gvn.C->set_has_circular_inline_type(true);
1162     } else if (ft->is_inlinetype()) {
1163       int old_len = visited.length();
1164       visited.push(ft);
1165       ciInlineKlass* vk = ft->as_inline_klass();
1166       if (vt->field_is_null_free(i)) {
1167         value = make_all_zero_impl(gvn, vk, visited);
1168       } else {
1169         value = make_null_impl(gvn, vk, visited);
1170       }
1171       visited.trunc_to(old_len);
1172     }
1173     vt->set_field_value(i, value);
1174   }
1175   vt = gvn.transform(vt)->as_InlineType();
1176   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1177   return vt;
1178 }
1179 
1180 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1181   const TypeInt* tinit = gvn->type(get_is_init())->isa_int();
1182   if (tinit == nullptr || !tinit->is_con(1)) {
1183     return false; // May be null
1184   }
1185   for (uint i = 0; i < field_count(); ++i) {
1186     Node* value = field_value(i);
1187     if (field_is_null_free(i)) {
1188       // Null-free value class field must have the all-zero value. If 'flat' is set,
1189       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1190       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
1191         return false;
1192       }
1193       continue;
1194     } else if (value->is_InlineType()) {
1195       // Nullable value class field must be null
1196       tinit = gvn->type(value->as_InlineType()->get_is_init())->isa_int();
1197       if (tinit != nullptr && tinit->is_con(0)) {
1198         continue;
1199       }
1200       return false;
1201     } else if (!gvn->type(value)->is_zero_type()) {
1202       return false;
1203     }
1204   }
1205   return true;
1206 }
1207 
1208 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool is_larval) {
1209   GrowableArray<ciType*> visited;
1210   visited.push(vk);
1211   return make_from_oop_impl(kit, oop, vk, visited, is_larval);
1212 }
1213 
1214 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) {
1215   PhaseGVN& gvn = kit->gvn();
1216 
1217   // Create and initialize an InlineTypeNode by loading all field
1218   // values from a heap-allocated version and also save the oop.
1219   InlineTypeNode* vt = nullptr;
1220 
1221   if (oop->isa_InlineType()) {
1222     // TODO 8335256 Re-enable assert and fix OSR code
1223     // Issue triggers with TestValueConstruction.java and -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch -XX:-TieredCompilation
1224     // assert(!is_larval || oop->as_InlineType()->is_larval(), "must be larval");
1225     if (is_larval && !oop->as_InlineType()->is_larval()) {
1226       vt = oop->clone()->as_InlineType();
1227       vt->set_is_larval(true);
1228       return gvn.transform(vt)->as_InlineType();
1229     }
1230     return oop->as_InlineType();
1231   } else if (gvn.type(oop)->maybe_null()) {
1232     // Add a null check because the oop may be null
1233     Node* null_ctl = kit->top();
1234     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1235     if (kit->stopped()) {
1236       // Constant null
1237       kit->set_control(null_ctl);
1238       vt = make_null_impl(gvn, vk, visited);
1239       kit->record_for_igvn(vt);
1240       return vt;
1241     }
1242     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1243     vt->set_is_buffered(gvn);
1244     vt->set_is_init(gvn);
1245     vt->set_is_larval(is_larval);
1246     vt->load(kit, not_null_oop, not_null_oop, vk, visited);
1247 
1248     if (null_ctl != kit->top()) {
1249       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1250       Node* region = new RegionNode(3);
1251       region->init_req(1, kit->control());
1252       region->init_req(2, null_ctl);
1253       vt = vt->clone_with_phis(&gvn, region, kit->map());
1254       vt->merge_with(&gvn, null_vt, 2, true);
1255       vt->set_oop(gvn, oop);
1256       kit->set_control(gvn.transform(region));
1257     }
1258   } else {
1259     // Oop can never be null
1260     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1261     Node* init_ctl = kit->control();
1262     vt->set_is_buffered(gvn);
1263     vt->set_is_init(gvn);
1264     vt->set_is_larval(is_larval);
1265     vt->load(kit, oop, oop, vk, visited);
1266 // TODO 8284443
1267 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1268 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1269   }
1270   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1271   kit->record_for_igvn(vt);
1272   return gvn.transform(vt)->as_InlineType();
1273 }
1274 
1275 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset,
1276                                                bool atomic, int null_marker_offset, DecoratorSet decorators) {
1277   GrowableArray<ciType*> visited;
1278   visited.push(vk);
1279   return make_from_flat_impl(kit, vk, obj, ptr, idx, holder, holder_offset, atomic, null_marker_offset, decorators, visited);
1280 }
1281 
1282 // GraphKit wrapper for the 'make_from_flat' method
1283 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset,
1284                                                     bool atomic, int null_marker_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1285   if (kit->gvn().type(obj)->isa_aryptr()) {
1286     kit->C->set_flat_accesses();
1287   }
1288   // Create and initialize an InlineTypeNode by loading all field values from
1289   // a flat inline type field at 'holder_offset' or from an inline type array.
1290   bool null_free = (null_marker_offset == -1);
1291   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1292 
1293   if (atomic) {
1294     // Read atomically and convert from payload
1295 #ifdef ASSERT
1296     bool is_naturally_atomic = null_free && vk->nof_declared_nonstatic_fields() <= 1;
1297     assert(!is_naturally_atomic, "No atomic access required");
1298 #endif
1299     BasicType bt = vk->atomic_size_to_basic_type(null_free);
1300     decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD;
1301     const Type* val_type = Type::get_const_basic_type(bt);
1302 
1303     bool is_array = (kit->gvn().type(obj)->isa_aryptr() != nullptr);
1304     Node* payload = nullptr;
1305     if (!is_array) {
1306       Node* adr = kit->basic_plus_adr(obj, ptr, holder_offset);
1307       payload = kit->access_load_at(obj, adr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control());
1308     } else {
1309       assert(holder_offset == 0, "sanity");
1310 
1311       RegionNode* region = new RegionNode(3);
1312       kit->gvn().set_type(region, Type::CONTROL);
1313       kit->record_for_igvn(region);
1314 
1315       payload = PhiNode::make(region, nullptr, val_type);
1316       kit->gvn().set_type(payload, val_type);
1317       kit->record_for_igvn(payload);
1318 
1319       Node* bol = kit->null_free_array_test(obj); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1320       IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN);
1321 
1322       kit->set_control(kit->IfFalse(iff));
1323       region->init_req(1, kit->control());
1324 
1325       // Nullable
1326       if (!kit->stopped()) {
1327         assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable");
1328         Node* load = kit->access_load_at(obj, ptr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control());
1329         payload->init_req(1, load);
1330       }
1331 
1332       kit->set_control(kit->IfTrue(iff));
1333 
1334       // Null-free
1335       if (!kit->stopped()) {
1336         // Check if it's atomic
1337         RegionNode* region_null_free = new RegionNode(3);
1338         kit->gvn().set_type(region_null_free, Type::CONTROL);
1339         kit->record_for_igvn(region_null_free);
1340 
1341         Node* payload_null_free = PhiNode::make(region_null_free, nullptr, val_type);
1342         kit->gvn().set_type(payload_null_free, val_type);
1343         kit->record_for_igvn(payload_null_free);
1344 
1345         bol = kit->null_free_atomic_array_test(obj, vk);
1346         IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN);
1347 
1348         kit->set_control(kit->IfTrue(iff));
1349         region_null_free->init_req(1, kit->control());
1350 
1351         // Atomic
1352         if (!kit->stopped()) {
1353           BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true);
1354           const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free);
1355 
1356           Node* cast = obj;
1357           Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true);
1358           Node* load = kit->access_load_at(cast, adr, TypeRawPtr::BOTTOM, val_type_null_free, bt_null_free, is_array ? (decorators | IS_ARRAY) : decorators, kit->control());
1359           if (bt == T_LONG && bt_null_free != T_LONG) {
1360             load = kit->gvn().transform(new ConvI2LNode(load));
1361           }
1362           // Set the null marker if not known to be null-free
1363           if (!null_free) {
1364             load = set_payload_value(&kit->gvn(), load, bt, kit->intcon(1), T_BOOLEAN, null_marker_offset);
1365           }
1366           payload_null_free->init_req(1, load);
1367         }
1368 
1369         kit->set_control(kit->IfFalse(iff));
1370         region_null_free->init_req(2, kit->control());
1371 
1372         // Non-Atomic
1373         if (!kit->stopped()) {
1374           // TODO 8350865 Is the conversion to/from payload folded? We should wire this directly
1375 
1376           InlineTypeNode* vt_atomic = make_uninitialized(kit->gvn(), vk, true);
1377           Node* cast = obj;
1378           Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false);
1379           vt_atomic->load(kit, cast, adr, holder, visited, holder_offset - vk->payload_offset(), decorators);
1380 
1381           Node* tmp_payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0);
1382           int oop_off_1 = -1;
1383           int oop_off_2 = -1;
1384           tmp_payload = vt_atomic->convert_to_payload(kit, bt, tmp_payload, 0, null_free, null_marker_offset, oop_off_1, oop_off_2);
1385 
1386           payload_null_free->init_req(2, tmp_payload);
1387         }
1388 
1389         region->init_req(2, kit->gvn().transform(region_null_free));
1390         payload->init_req(2, kit->gvn().transform(payload_null_free));
1391       }
1392 
1393       kit->set_control(kit->gvn().transform(region));
1394     }
1395 
1396     vt->convert_from_payload(kit, bt, kit->gvn().transform(payload), 0, null_free, null_marker_offset - holder_offset);
1397     return kit->gvn().transform(vt)->as_InlineType();
1398   }
1399   assert(null_free, "Nullable flat implies atomic");
1400 
1401   // The inline type is flattened into the object without an oop header. Subtract the
1402   // offset of the first field to account for the missing header when loading the values.
1403   holder_offset -= vk->payload_offset();
1404   vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators);
1405   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
1406   return kit->gvn().transform(vt)->as_InlineType();
1407 }
1408 
1409 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1410   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1411   if (!in) {
1412     // Keep track of the oop. The returned inline type might already be buffered.
1413     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1414     vt->set_oop(kit->gvn(), oop);
1415   }
1416   GrowableArray<ciType*> visited;
1417   visited.push(vk);
1418   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1419   return kit->gvn().transform(vt)->as_InlineType();
1420 }
1421 
1422 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
1423   ciInlineKlass* vk = inline_klass();
1424   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
1425   for (uint i = 1; i < req(); ++i) {
1426     res->set_req(i, in(i));
1427   }
1428 
1429   if (allocate) {
1430     // Re-execute if buffering triggers deoptimization
1431     PreserveReexecuteState preexecs(kit);
1432     kit->jvms()->set_should_reexecute(true);
1433     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
1434     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, true);
1435     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
1436     alloc->_larval = true;
1437 
1438     store(kit, alloc_oop, alloc_oop, vk);
1439     res->set_oop(kit->gvn(), alloc_oop);
1440   }
1441   // TODO 8239003
1442   //res->set_type(TypeInlineType::make(vk, true));
1443   res = kit->gvn().transform(res)->as_InlineType();
1444   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
1445   return res;
1446 }
1447 
1448 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
1449   Node* obj = get_oop();
1450   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
1451   Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1452   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
1453   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), MemNode::unordered);
1454 
1455   // Do not let stores that initialize this buffer be reordered with a subsequent
1456   // store that would make this buffer accessible by other threads.
1457   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj);
1458   assert(alloc != nullptr, "must have an allocation node");
1459   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1460 
1461   ciInlineKlass* vk = inline_klass();
1462   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
1463   for (uint i = 1; i < req(); ++i) {
1464     res->set_req(i, in(i));
1465   }
1466   // TODO 8239003
1467   //res->set_type(TypeInlineType::make(vk, false));
1468   res = kit->gvn().transform(res)->as_InlineType();
1469   return res;
1470 }
1471 
1472 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const {
1473   if (!is_allocated(gvn)) {
1474     return false;
1475   }
1476 
1477   Node* oop = get_oop();
1478   AllocateNode* alloc = AllocateNode::Ideal_allocation(oop);
1479   return alloc != nullptr && alloc->_larval;
1480 }
1481 
1482 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1483   if (is_larval() || is_larval(phase)) {
1484     return nullptr;
1485   }
1486   if (vk == nullptr) {
1487     vk = inline_klass();
1488   }
1489   for (uint i = 0; i < field_count(); ++i) {
1490     int offset = holder_offset + field_offset(i);
1491     Node* value = field_value(i);
1492     if (value->is_InlineType()) {
1493       InlineTypeNode* vt = value->as_InlineType();
1494       if (vt->type()->inline_klass()->is_empty()) {
1495         continue;
1496       } else if (field_is_flat(i) && vt->is_InlineType()) {
1497         // Check inline type field load recursively
1498         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1499         if (base == nullptr) {
1500           return nullptr;
1501         }
1502         continue;
1503       } else {
1504         value = vt->get_oop();
1505         if (value->Opcode() == Op_CastPP) {
1506           // Skip CastPP
1507           value = value->in(1);
1508         }
1509       }
1510     }
1511     if (value->isa_DecodeN()) {
1512       // Skip DecodeN
1513       value = value->in(1);
1514     }
1515     if (value->isa_Load()) {
1516       // Check if base and offset of field load matches inline type layout
1517       intptr_t loffset = 0;
1518       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1519       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1520         return nullptr;
1521       } else if (base == nullptr) {
1522         // Set base and check if pointer type matches
1523         base = lbase;
1524         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1525         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1526           return nullptr;
1527         }
1528       }
1529     } else {
1530       return nullptr;
1531     }
1532   }
1533   return base;
1534 }
1535 
1536 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1537   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1538   intptr_t bits = tk->get_con();
1539   set_nth_bit(bits, 0);
1540   return gvn.longcon((jlong)bits);
1541 }
1542 
1543 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1544   if (!null_free && in) {
1545     n->init_req(base_input++, get_is_init());
1546   }
1547   for (uint i = 0; i < field_count(); i++) {
1548     Node* arg = field_value(i);
1549     if (field_is_flat(i)) {
1550       // Flat inline type field
1551       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1552       if (!field_is_null_free(i)) {
1553         assert(field_null_marker_offset(i) != -1, "inconsistency");
1554         n->init_req(base_input++, arg->as_InlineType()->get_is_init());
1555       }
1556     } else {
1557       if (arg->is_InlineType()) {
1558         // Non-flat inline type field
1559         InlineTypeNode* vt = arg->as_InlineType();
1560         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1561         arg = vt->buffer(kit);
1562       }
1563       // Initialize call/return arguments
1564       n->init_req(base_input++, arg);
1565       if (field_type(i)->size() == 2) {
1566         n->init_req(base_input++, kit->top());
1567       }
1568     }
1569   }
1570   // The last argument is used to pass IsInit information to compiled code and not required here.
1571   if (!null_free && !in) {
1572     n->init_req(base_input++, kit->top());
1573   }
1574 }
1575 
1576 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1577   PhaseGVN& gvn = kit->gvn();
1578   Node* is_init = nullptr;
1579   if (!null_free) {
1580     // Nullable inline type
1581     if (in) {
1582       // Set IsInit field
1583       if (multi->is_Start()) {
1584         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1585       } else {
1586         is_init = multi->as_Call()->in(base_input);
1587       }
1588       set_req(IsInit, is_init);
1589       base_input++;
1590     }
1591     // Add a null check to make subsequent loads dependent on
1592     assert(null_check_region == nullptr, "already set");
1593     if (is_init == nullptr) {
1594       // Will only be initialized below, use dummy node for now
1595       is_init = new Node(1);
1596       is_init->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1597       gvn.set_type_bottom(is_init);
1598     }
1599     Node* null_ctrl = kit->top();
1600     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1601     Node* non_null_ctrl = kit->control();
1602     null_check_region = new RegionNode(3);
1603     null_check_region->init_req(1, non_null_ctrl);
1604     null_check_region->init_req(2, null_ctrl);
1605     null_check_region = gvn.transform(null_check_region);
1606     kit->set_control(null_check_region);
1607   }
1608 
1609   for (uint i = 0; i < field_count(); ++i) {
1610     ciType* type = field_type(i);
1611     Node* parm = nullptr;
1612     if (field_is_flat(i)) {
1613       // Flat inline type field
1614       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1615       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1616       if (!field_is_null_free(i)) {
1617         assert(field_null_marker_offset(i) != -1, "inconsistency");
1618         Node* is_init = nullptr;
1619         if (multi->is_Start()) {
1620           is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1621         } else if (in) {
1622           is_init = multi->as_Call()->in(base_input);
1623         } else {
1624           is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1625         }
1626         vt->set_req(IsInit, is_init);
1627         base_input++;
1628       }
1629       parm = gvn.transform(vt);
1630     } else {
1631       if (multi->is_Start()) {
1632         assert(in, "return from start?");
1633         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1634       } else if (in) {
1635         parm = multi->as_Call()->in(base_input);
1636       } else {
1637         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1638       }
1639       bool null_free = field_is_null_free(i);
1640       // Non-flat inline type field
1641       if (type->is_inlinetype()) {
1642         if (null_check_region != nullptr) {
1643           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1644           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1645           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1646           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1647             parm = parm->as_InlineType()->get_oop();
1648           }
1649           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1650           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1651           parm->set_req(2, kit->zerocon(T_OBJECT));
1652           parm = gvn.transform(parm);
1653           null_free = false;
1654         }
1655         if (visited.contains(type)) {
1656           kit->C->set_has_circular_inline_type(true);
1657         } else if (!parm->is_InlineType()) {
1658           int old_len = visited.length();
1659           visited.push(type);
1660           if (null_free) {
1661             parm = kit->cast_not_null(parm);
1662           }
1663           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1664           visited.trunc_to(old_len);
1665         }
1666       }
1667       base_input += type->size();
1668     }
1669     assert(parm != nullptr, "should never be null");
1670     assert(field_value(i) == nullptr, "already set");
1671     set_field_value(i, parm);
1672     gvn.record_for_igvn(parm);
1673   }
1674   // The last argument is used to pass IsInit information to compiled code
1675   if (!null_free && !in) {
1676     Node* cmp = is_init->raw_out(0);
1677     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1678     set_req(IsInit, is_init);
1679     gvn.hash_delete(cmp);
1680     cmp->set_req(1, is_init);
1681     gvn.hash_find_insert(cmp);
1682     gvn.record_for_igvn(cmp);
1683     base_input++;
1684   }
1685 }
1686 
1687 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1688 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1689 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1690   // TODO 8332886 Really needed? GVN is disabled anyway.
1691   if (is_larval()) {
1692     return;
1693   }
1694   PhaseIterGVN* igvn = &phase->igvn();
1695   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1696   // will be removed anyway and changing the memory chain will confuse other optimizations.
1697   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1698     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1699     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1700       Node* res = alloc->result_cast();
1701       if (res == nullptr || !res->is_CheckCastPP()) {
1702         break; // No unique CheckCastPP
1703       }
1704       // Search for a dominating allocation of the same inline type
1705       Node* res_dom = res;
1706       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1707         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1708         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1709           Node* res_other = alloc_other->result_cast();
1710           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1711               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1712             res_dom = res_other;
1713           }
1714         }
1715       }
1716       if (res_dom != res) {
1717         // Replace allocation by dominating one.
1718         replace_allocation(igvn, res, res_dom);
1719         // The result of the dominated allocation is now unused and will be removed
1720         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1721         igvn->_worklist.push(alloc);
1722       }
1723     }
1724   }
1725 }
1726 
1727 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1728   GrowableArray<ciType*> visited;
1729   visited.push(vk);
1730   return make_null_impl(gvn, vk, visited, transform);
1731 }
1732 
1733 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1734   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1735   vt->set_is_buffered(gvn);
1736   vt->set_is_init(gvn, false);
1737   for (uint i = 0; i < vt->field_count(); i++) {
1738     ciType* ft = vt->field_type(i);
1739     Node* value = gvn.zerocon(ft->basic_type());
1740     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1741       gvn.C->set_has_circular_inline_type(true);
1742     } else if (ft->is_inlinetype()) {
1743       int old_len = visited.length();
1744       visited.push(ft);
1745       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1746       visited.trunc_to(old_len);
1747     }
1748     vt->set_field_value(i, value);
1749   }
1750   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1751 }
1752 
1753 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1754   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1755     return clone()->as_InlineType();
1756   }
1757   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1758     if (fast_out(i) != map) {
1759       return clone()->as_InlineType();
1760     }
1761   }
1762   gvn->hash_delete(this);
1763   return this;
1764 }
1765 
1766 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1767   Node* oop = get_oop();
1768   const Type* toop = phase->type(oop);
1769 #ifdef ASSERT
1770   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1771     // We are not allocated (anymore) and should therefore not have an instance id
1772     dump(1);
1773     assert(false, "Unbuffered inline type should not have known instance id");
1774   }
1775 #endif
1776   const Type* t = toop->filter_speculative(_type);
1777   if (t->singleton()) {
1778     // Don't replace InlineType by a constant
1779     t = _type;
1780   }
1781   const Type* tinit = phase->type(in(IsInit));
1782   if (tinit == Type::TOP) {
1783     return Type::TOP;
1784   }
1785   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1786     t = t->join_speculative(TypePtr::NOTNULL);
1787   }
1788   return t;
1789 }
1790 
1791 #ifndef PRODUCT
1792 void InlineTypeNode::dump_spec(outputStream* st) const {
1793   if (_is_larval) {
1794     st->print(" #larval");
1795   }
1796 }
1797 #endif // NOT PRODUCT