1 /*
   2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) {
  39   InlineTypeNode* vt = clone_if_required(gvn, map);
  40   const Type* t = Type::get_const_type(inline_klass());
  41   gvn->set_type(vt, t);
  42   vt->as_InlineType()->set_type(t);
  43 
  44   // Create a PhiNode for merging the oop values
  45   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  46   gvn->set_type(oop, t);
  47   gvn->record_for_igvn(oop);
  48   vt->set_oop(*gvn, oop);
  49 
  50   // Create a PhiNode for merging the is_buffered values
  51   t = Type::get_const_basic_type(T_BOOLEAN);
  52   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  53   gvn->set_type(is_buffered_node, t);
  54   gvn->record_for_igvn(is_buffered_node);
  55   vt->set_req(IsBuffered, is_buffered_node);
  56 
  57   // Create a PhiNode for merging the is_init values
  58   Node* is_init_node;
  59   if (is_init) {
  60     is_init_node = gvn->intcon(1);
  61   } else {
  62     t = Type::get_const_basic_type(T_BOOLEAN);
  63     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  64     gvn->set_type(is_init_node, t);
  65     gvn->record_for_igvn(is_init_node);
  66   }
  67   vt->set_req(IsInit, is_init_node);
  68 
  69   // Create a PhiNode each for merging the field values
  70   for (uint i = 0; i < vt->field_count(); ++i) {
  71     ciType* type = vt->field_type(i);
  72     Node*  value = vt->field_value(i);
  73     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  74     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  75     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  76     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  77     if (value->is_InlineType() && no_circularity) {
  78       // Handle inline type fields recursively
  79       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  80     } else {
  81       t = Type::get_const_type(type);
  82       value = PhiNode::make(region, value, t);
  83       gvn->set_type(value, t);
  84       gvn->record_for_igvn(value);
  85     }
  86     vt->set_field_value(i, value);
  87   }
  88   gvn->record_for_igvn(vt);
  89   return vt;
  90 }
  91 
  92 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  93 // for the given region (see InlineTypeNode::clone_with_phis).
  94 bool InlineTypeNode::has_phi_inputs(Node* region) {
  95   // Check oop input
  96   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  97 #ifdef ASSERT
  98   if (result) {
  99     // Check all field value inputs for consistency
 100     for (uint i = Values; i < field_count(); ++i) {
 101       Node* n = in(i);
 102       if (n->is_InlineType()) {
 103         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 104       } else {
 105         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 106       }
 107     }
 108   }
 109 #endif
 110   return result;
 111 }
 112 
 113 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 114 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 115   // Merge oop inputs
 116   PhiNode* phi = get_oop()->as_Phi();
 117   phi->set_req(pnum, other->get_oop());
 118   if (transform) {
 119     set_oop(*gvn, gvn->transform(phi));
 120   }
 121 
 122   // Merge is_buffered inputs
 123   phi = get_is_buffered()->as_Phi();
 124   phi->set_req(pnum, other->get_is_buffered());
 125   if (transform) {
 126     set_req(IsBuffered, gvn->transform(phi));
 127   }
 128 
 129   // Merge is_init inputs
 130   Node* is_init = get_is_init();
 131   if (is_init->is_Phi()) {
 132     phi = is_init->as_Phi();
 133     phi->set_req(pnum, other->get_is_init());
 134     if (transform) {
 135       set_req(IsInit, gvn->transform(phi));
 136     }
 137   } else {
 138     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 139   }
 140 
 141   // Merge field values
 142   for (uint i = 0; i < field_count(); ++i) {
 143     Node* val1 =        field_value(i);
 144     Node* val2 = other->field_value(i);
 145     if (val1->is_InlineType()) {
 146       if (val2->is_Phi()) {
 147         val2 = gvn->transform(val2);
 148       }
 149       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 150     } else {
 151       assert(val1->is_Phi(), "must be a phi node");
 152       val1->set_req(pnum, val2);
 153     }
 154     if (transform) {
 155       set_field_value(i, gvn->transform(val1));
 156     }
 157   }
 158   return this;
 159 }
 160 
 161 // Adds a new merge path to an inline type node with phi inputs
 162 void InlineTypeNode::add_new_path(Node* region) {
 163   assert(has_phi_inputs(region), "must have phi inputs");
 164 
 165   PhiNode* phi = get_oop()->as_Phi();
 166   phi->add_req(nullptr);
 167   assert(phi->req() == region->req(), "must be same size as region");
 168 
 169   phi = get_is_buffered()->as_Phi();
 170   phi->add_req(nullptr);
 171   assert(phi->req() == region->req(), "must be same size as region");
 172 
 173   phi = get_is_init()->as_Phi();
 174   phi->add_req(nullptr);
 175   assert(phi->req() == region->req(), "must be same size as region");
 176 
 177   for (uint i = 0; i < field_count(); ++i) {
 178     Node* val = field_value(i);
 179     if (val->is_InlineType()) {
 180       val->as_InlineType()->add_new_path(region);
 181     } else {
 182       val->as_Phi()->add_req(nullptr);
 183       assert(val->req() == region->req(), "must be same size as region");
 184     }
 185   }
 186 }
 187 
 188 Node* InlineTypeNode::field_value(uint index) const {
 189   assert(index < field_count(), "index out of bounds");
 190   return in(Values + index);
 191 }
 192 
 193 // Get the value of the field at the given offset.
 194 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 195 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 196   // If the field at 'offset' belongs to a flat inline type field, 'index' refers to the
 197   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 198   int index = inline_klass()->field_index_by_offset(offset);
 199   int sub_offset = offset - field_offset(index);
 200   Node* value = field_value(index);
 201   assert(value != nullptr, "field value not found");
 202   if (recursive && value->is_InlineType()) {
 203     if (field_is_flat(index)) {
 204       // Flat inline type field
 205       InlineTypeNode* vt = value->as_InlineType();
 206       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 207       return vt->field_value_by_offset(sub_offset, recursive);
 208     } else {
 209       assert(sub_offset == 0, "should not have a sub offset");
 210       return value;
 211     }
 212   }
 213   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 214   assert(sub_offset == 0, "offset mismatch");
 215   return value;
 216 }
 217 
 218 void InlineTypeNode::set_field_value(uint index, Node* value) {
 219   assert(index < field_count(), "index out of bounds");
 220   set_req(Values + index, value);
 221 }
 222 
 223 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 224   set_field_value(field_index(offset), value);
 225 }
 226 
 227 int InlineTypeNode::field_offset(uint index) const {
 228   assert(index < field_count(), "index out of bounds");
 229   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 230 }
 231 
 232 uint InlineTypeNode::field_index(int offset) const {
 233   uint i = 0;
 234   for (; i < field_count() && field_offset(i) != offset; i++) { }
 235   assert(i < field_count(), "field not found");
 236   return i;
 237 }
 238 
 239 ciType* InlineTypeNode::field_type(uint index) const {
 240   assert(index < field_count(), "index out of bounds");
 241   return inline_klass()->declared_nonstatic_field_at(index)->type();
 242 }
 243 
 244 bool InlineTypeNode::field_is_flat(uint index) const {
 245   assert(index < field_count(), "index out of bounds");
 246   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 247   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 248   return field->is_flat();
 249 }
 250 
 251 bool InlineTypeNode::field_is_null_free(uint index) const {
 252   assert(index < field_count(), "index out of bounds");
 253   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 254   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 255   return field->is_null_free();
 256 }
 257 
 258 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 259   // Don't scalarize larvals in their own constructor call because the constructor will update them
 260   if (is_larval() && sfpt->is_CallJava() && sfpt->as_CallJava()->method() != nullptr && sfpt->as_CallJava()->method()->is_object_constructor() &&
 261       sfpt->as_CallJava()->method()->holder()->is_inlinetype() && sfpt->in(TypeFunc::Parms) == this) {
 262     assert(is_allocated(igvn), "receiver must be allocated");
 263     return;
 264   }
 265 
 266   ciInlineKlass* vk = inline_klass();
 267   uint nfields = vk->nof_nonstatic_fields();
 268   JVMState* jvms = sfpt->jvms();
 269   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 270   assert(jvms != nullptr, "missing JVMS");
 271   uint first_ind = (sfpt->req() - jvms->scloff());
 272   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 273                                                                   nullptr,
 274                                                                   first_ind,
 275                                                                   sfpt->jvms()->depth(),
 276                                                                   nfields);
 277   sobj->init_req(0, igvn->C->root());
 278   // Nullable inline types have an IsInit field that needs
 279   // to be checked before using the field values.
 280   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 281     sfpt->add_req(get_is_init());
 282   } else {
 283     sfpt->add_req(igvn->C->top());
 284   }
 285   // Iterate over the inline type fields in order of increasing
 286   // offset and add the field values to the safepoint.
 287   for (uint j = 0; j < nfields; ++j) {
 288     int offset = vk->nonstatic_field_at(j)->offset_in_bytes();
 289     Node* value = field_value_by_offset(offset, true /* include flat inline type fields */);
 290     if (value->is_InlineType()) {
 291       // Add inline type field to the worklist to process later
 292       worklist.push(value);
 293     }
 294     sfpt->add_req(value);
 295   }
 296   jvms->set_endoff(sfpt->req());
 297   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 298   igvn->rehash_node_delayed(sfpt);
 299   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 300     Node* debug = sfpt->in(i);
 301     if (debug != nullptr && debug->uncast() == this) {
 302       sfpt->set_req(i, sobj);
 303     }
 304   }
 305 }
 306 
 307 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 308   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 309   // in the safepoint to avoid keeping field loads live just for the debug info.
 310   Node* oop = get_oop();
 311   // TODO 8325106
 312   // TestBasicFunctionality::test3 fails without this. Add more tests?
 313   // Add proj nodes here? Recursive handling of phis required? We need a test that fails without.
 314   bool use_oop = false;
 315   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 316     Unique_Node_List worklist;
 317     VectorSet visited;
 318     visited.set(oop->_idx);
 319     worklist.push(oop);
 320     use_oop = true;
 321     while (worklist.size() > 0 && use_oop) {
 322       Node* n = worklist.pop();
 323       for (uint i = 1; i < n->req(); i++) {
 324         Node* in = n->in(i);
 325         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 326           worklist.push(in);
 327         // TestNullableArrays.test123 fails when enabling this, probably we should make sure that we don't load from a just allocated object
 328         //} else if (!(in->is_Con() || in->is_Parm() || in->is_Load() || (in->isa_DecodeN() && in->in(1)->is_Load()))) {
 329         } else if (!(in->is_Con() || in->is_Parm())) {
 330           use_oop = false;
 331           break;
 332         }
 333       }
 334     }
 335   } else {
 336     use_oop = allow_oop && is_allocated(igvn) &&
 337               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 338   }
 339 
 340   ResourceMark rm;
 341   Unique_Node_List safepoints;
 342   Unique_Node_List vt_worklist;
 343   Unique_Node_List worklist;
 344   worklist.push(this);
 345   while (worklist.size() > 0) {
 346     Node* n = worklist.pop();
 347     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 348       Node* use = n->fast_out(i);
 349       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 350         safepoints.push(use);
 351       } else if (use->is_ConstraintCast()) {
 352         worklist.push(use);
 353       }
 354     }
 355   }
 356 
 357   // Process all safepoint uses and scalarize inline type
 358   while (safepoints.size() > 0) {
 359     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 360     if (use_oop) {
 361       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 362         Node* debug = sfpt->in(i);
 363         if (debug != nullptr && debug->uncast() == this) {
 364           sfpt->set_req(i, get_oop());
 365         }
 366       }
 367       igvn->rehash_node_delayed(sfpt);
 368     } else {
 369       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 370     }
 371   }
 372   // Now scalarize non-flat fields
 373   for (uint i = 0; i < vt_worklist.size(); ++i) {
 374     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 375     vt->make_scalar_in_safepoints(igvn);
 376   }
 377   if (outcnt() == 0) {
 378     igvn->_worklist.push(this);
 379   }
 380 }
 381 
 382 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 383   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 384   const TypePtr* adr_type = nullptr;
 385   bool is_array = ary_type != nullptr;
 386   if ((decorators & C2_MISMATCHED) != 0) {
 387     adr_type = TypeRawPtr::BOTTOM;
 388   } else if (is_array) {
 389     // In the case of a flat inline type array, each field has its own slice
 390     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 391   } else {
 392     ciField* field = holder->get_field_by_offset(offset, false);
 393     assert(field != nullptr, "field not found");
 394     adr_type = gvn.C->alias_type(field)->adr_type();
 395   }
 396   return adr_type;
 397 }
 398 
 399 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 400 // of the same type but with different scalarization depth during GVN. This method adjusts the
 401 // scalarization depth to avoid inconsistencies during merging.
 402 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 403   if (!kit->C->has_circular_inline_type()) {
 404     return this;
 405   }
 406   GrowableArray<ciType*> visited;
 407   visited.push(inline_klass());
 408   return adjust_scalarization_depth_impl(kit, visited);
 409 }
 410 
 411 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 412   InlineTypeNode* val = this;
 413   for (uint i = 0; i < field_count(); ++i) {
 414     Node* value = field_value(i);
 415     Node* new_value = value;
 416     ciType* ft = field_type(i);
 417     if (value->is_InlineType()) {
 418       if (!field_is_flat(i) && visited.contains(ft)) {
 419         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 420       } else {
 421         int old_len = visited.length();
 422         visited.push(ft);
 423         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 424         visited.trunc_to(old_len);
 425       }
 426     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 427       int old_len = visited.length();
 428       visited.push(ft);
 429       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), field_is_null_free(i), visited);
 430       visited.trunc_to(old_len);
 431     }
 432     if (value != new_value) {
 433       if (val == this) {
 434         val = clone_if_required(&kit->gvn(), kit->map());
 435       }
 436       val->set_field_value(i, new_value);
 437     }
 438   }
 439   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 440 }
 441 
 442 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) {
 443   // Initialize the inline type by loading its field values from
 444   // memory and adding the values as input edges to the node.
 445   for (uint i = 0; i < field_count(); ++i) {
 446     int offset = holder_offset + field_offset(i);
 447     Node* value = nullptr;
 448     ciType* ft = field_type(i);
 449     bool null_free = field_is_null_free(i);
 450     if (null_free && ft->as_inline_klass()->is_empty()) {
 451       // Loading from a field of an empty inline type. Just return the default instance.
 452       value = make_default_impl(kit->gvn(), ft->as_inline_klass(), visited);
 453     } else if (field_is_flat(i)) {
 454       // Recursively load the flat inline type field
 455       value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators, visited);
 456     } else {
 457       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 458       bool is_array = (oop_ptr->isa_aryptr() != nullptr);
 459       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 460       if (base->is_Con() && !is_array && !mismatched) {
 461         // If the oop to the inline type is constant (static final field), we can
 462         // also treat the fields as constants because the inline type is immutable.
 463         ciObject* constant_oop = oop_ptr->const_oop();
 464         ciField* field = holder->get_field_by_offset(offset, false);
 465         assert(field != nullptr, "field not found");
 466         ciConstant constant = constant_oop->as_instance()->field_value(field);
 467         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 468         assert(con_type != nullptr, "type not found");
 469         value = kit->gvn().transform(kit->makecon(con_type));
 470         // Check type of constant which might be more precise than the static field type
 471         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 472           ft = con_type->inline_klass();
 473           null_free = true;
 474         }
 475       } else {
 476         // Load field value from memory
 477         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 478         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 479         BasicType bt = type2field[ft->basic_type()];
 480         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 481         const Type* val_type = Type::get_const_type(ft);
 482         value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 483       }
 484       // Loading a non-flattened inline type from memory
 485       if (visited.contains(ft)) {
 486         kit->C->set_has_circular_inline_type(true);
 487       } else if (ft->is_inlinetype()) {
 488         int old_len = visited.length();
 489         visited.push(ft);
 490         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), null_free, visited);
 491         visited.trunc_to(old_len);
 492       }
 493     }
 494     set_field_value(i, value);
 495   }
 496 }
 497 
 498 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 499   if (kit->gvn().type(base)->isa_aryptr()) {
 500     kit->C->set_flat_accesses();
 501   }
 502   // The inline type is embedded into the object without an oop header. Subtract the
 503   // offset of the first field to account for the missing header when storing the values.
 504   if (holder == nullptr) {
 505     holder = inline_klass();
 506   }
 507   holder_offset -= inline_klass()->first_field_offset();
 508   store(kit, base, ptr, holder, holder_offset, decorators);
 509 }
 510 
 511 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators, int offsetOnly) const {
 512   // Write field values to memory
 513   for (uint i = 0; i < field_count(); ++i) {
 514     if (offsetOnly != -1 && offsetOnly != field_offset(i)) continue;
 515     int offset = holder_offset + field_offset(i);
 516     Node* value = field_value(i);
 517     ciType* ft = field_type(i);
 518     if (field_is_flat(i)) {
 519       // Recursively store the flat inline type field
 520       value->as_InlineType()->store_flat(kit, base, ptr, holder, offset, decorators);
 521     } else {
 522       // Store field value to memory
 523       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 524       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 525       BasicType bt = type2field[ft->basic_type()];
 526       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 527       const Type* val_type = Type::get_const_type(ft);
 528       bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr);
 529       kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 530     }
 531   }
 532 }
 533 
 534 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 535   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 536     // Already buffered
 537     return this;
 538   }
 539 
 540   // TODO 8325106
 541   /*
 542   if (inline_klass()->is_initialized() && inline_klass()->is_empty()) {
 543     assert(false, "Should not buffer empty inline klass");
 544   }
 545   */
 546 
 547   // Check if inline type is already buffered
 548   Node* not_buffered_ctl = kit->top();
 549   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 550   if (not_buffered_ctl->is_top()) {
 551     // Already buffered
 552     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 553     vt->set_is_buffered(kit->gvn());
 554     vt = kit->gvn().transform(vt)->as_InlineType();
 555     if (safe_for_replace) {
 556       kit->replace_in_map(this, vt);
 557     }
 558     return vt;
 559   }
 560   Node* buffered_ctl = kit->control();
 561   kit->set_control(not_buffered_ctl);
 562 
 563   // Inline type is not buffered, check if it is null.
 564   Node* null_ctl = kit->top();
 565   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 566   bool null_free = null_ctl->is_top();
 567 
 568   RegionNode* region = new RegionNode(4);
 569   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 570 
 571   // InlineType is already buffered
 572   region->init_req(1, buffered_ctl);
 573   oop->init_req(1, not_null_oop);
 574 
 575   // InlineType is null
 576   region->init_req(2, null_ctl);
 577   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 578 
 579   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 580   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 581 
 582   int bci = kit->bci();
 583   bool reexecute = kit->jvms()->should_reexecute();
 584   if (!kit->stopped()) {
 585     assert(!is_allocated(&kit->gvn()), "already buffered");
 586 
 587     // Allocate and initialize buffer
 588     PreserveJVMState pjvms(kit);
 589     // Propagate re-execution state and bci
 590     kit->set_bci(bci);
 591     kit->jvms()->set_bci(bci);
 592     kit->jvms()->set_should_reexecute(reexecute);
 593 
 594     kit->kill_dead_locals();
 595     ciInlineKlass* vk = inline_klass();
 596     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 597     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 598     // No need to initialize a larval buffer, we make sure that the oop can not escape
 599     if (!is_larval()) {
 600       // Larval will be initialized later
 601       // TODO 8325106 should this use C2_TIGHTLY_COUPLED_ALLOC?
 602       store(kit, alloc_oop, alloc_oop, vk);
 603 
 604       // Do not let stores that initialize this buffer be reordered with a subsequent
 605       // store that would make this buffer accessible by other threads.
 606       AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 607       assert(alloc != nullptr, "must have an allocation node");
 608       kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 609     }
 610 
 611     region->init_req(3, kit->control());
 612     oop   ->init_req(3, alloc_oop);
 613     io    ->init_req(3, kit->i_o());
 614     mem   ->init_req(3, kit->merged_memory());
 615   }
 616 
 617   // Update GraphKit
 618   kit->set_control(kit->gvn().transform(region));
 619   kit->set_i_o(kit->gvn().transform(io));
 620   kit->set_all_memory(kit->gvn().transform(mem));
 621   kit->record_for_igvn(region);
 622   kit->record_for_igvn(oop);
 623   kit->record_for_igvn(io);
 624   kit->record_for_igvn(mem);
 625 
 626   // Use cloned InlineTypeNode to propagate oop from now on
 627   Node* res_oop = kit->gvn().transform(oop);
 628   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 629   vt->set_oop(kit->gvn(), res_oop);
 630   vt->set_is_buffered(kit->gvn());
 631   vt = kit->gvn().transform(vt)->as_InlineType();
 632   if (safe_for_replace) {
 633     kit->replace_in_map(this, vt);
 634   }
 635   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 636   // Make sure it gets a chance to remove this allocation.
 637   kit->C->set_has_split_ifs(true);
 638   return vt;
 639 }
 640 
 641 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 642   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 643     return true;
 644   }
 645   Node* oop = get_oop();
 646   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 647   return !oop_type->maybe_null();
 648 }
 649 
 650 // When a call returns multiple values, it has several result
 651 // projections, one per field. Replacing the result of the call by an
 652 // inline type node (after late inlining) requires that for each result
 653 // projection, we find the corresponding inline type field.
 654 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 655   ciInlineKlass* vk = inline_klass();
 656   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 657     ProjNode* pn = call->fast_out(i)->as_Proj();
 658     uint con = pn->_con;
 659     Node* field = nullptr;
 660     if (con == TypeFunc::Parms) {
 661       field = get_oop();
 662     } else if (con == (call->tf()->range_cc()->cnt() - 1)) {
 663       field = get_is_init();
 664     } else if (con > TypeFunc::Parms) {
 665       uint field_nb = con - (TypeFunc::Parms+1);
 666       int extra = 0;
 667       for (uint j = 0; j < field_nb - extra; j++) {
 668         ciField* f = vk->nonstatic_field_at(j);
 669         BasicType bt = f->type()->basic_type();
 670         if (bt == T_LONG || bt == T_DOUBLE) {
 671           extra++;
 672         }
 673       }
 674       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 675       field = field_value_by_offset(f->offset_in_bytes(), true);
 676     }
 677     if (field != nullptr) {
 678       C->gvn_replace_by(pn, field);
 679       C->initial_gvn()->hash_delete(pn);
 680       pn->set_req(0, C->top());
 681       --i; --imax;
 682     }
 683   }
 684 }
 685 
 686 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 687   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 688   for (uint i = 0; i < field_count(); i++) {
 689      Node* value = field_value(i);
 690      if (field_is_flat(i)) {
 691        // Flat inline type field
 692        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 693      } else if (value->is_InlineType()) {
 694        // Non-flat inline type field
 695        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 696      }
 697   }
 698   vt = kit->gvn().transform(vt)->as_InlineType();
 699   kit->replace_in_map(this, vt);
 700   return vt;
 701 }
 702 
 703 // Replace a buffer allocation by a dominating allocation
 704 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 705   // Remove initializing stores and GC barriers
 706   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 707     Node* use = res->fast_out(i);
 708     if (use->is_AddP()) {
 709       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 710         Node* store = use->fast_out(j)->isa_Store();
 711         if (store != nullptr) {
 712           igvn->rehash_node_delayed(store);
 713           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 714         }
 715       }
 716     } else if (use->Opcode() == Op_CastP2X) {
 717       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 718         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 719         // we store into, as well as the value we are storing. Skip if this is a
 720         // barrier for storing 'res' into another object.
 721         continue;
 722       }
 723       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 724       bs->eliminate_gc_barrier(igvn, use);
 725       --i; --imax;
 726     }
 727   }
 728   igvn->replace_node(res, dom);
 729 }
 730 
 731 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 732   Node* oop = get_oop();
 733   const Type* tinit = phase->type(get_is_init());
 734   if (!is_larval(phase) && !is_larval() &&
 735       (tinit->isa_int() && tinit->is_int()->is_con(1)) &&
 736       (is_default(phase) || inline_klass()->is_empty()) &&
 737       inline_klass()->is_initialized() &&
 738       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 739     // Use the pre-allocated oop for null-free default or empty inline types
 740     set_oop(*phase, default_oop(*phase, inline_klass()));
 741     assert(is_allocated(phase), "should now be allocated");
 742     return this;
 743   }
 744   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 745     InlineTypeNode* vtptr = oop->as_InlineType();
 746     set_oop(*phase, vtptr->get_oop());
 747     set_is_buffered(*phase);
 748     set_is_init(*phase);
 749     for (uint i = Values; i < vtptr->req(); ++i) {
 750       set_req(i, vtptr->in(i));
 751     }
 752     return this;
 753   }
 754   // TODO 8325106 Re-evaluate this: We prefer a "loaded" oop because it's free. The existing oop might come from a buffering.
 755   if (!is_larval(phase) && !is_larval()) {
 756     // Save base oop if fields are loaded from memory and the inline
 757     // type is not buffered (in this case we should not use the oop).
 758     Node* base = is_loaded(phase);
 759     if (base != nullptr && get_oop() != base && !phase->type(base)->maybe_null()) {
 760       set_oop(*phase, base);
 761       assert(is_allocated(phase), "should now be allocated");
 762       return this;
 763     }
 764   }
 765 
 766   if (can_reshape) {
 767     PhaseIterGVN* igvn = phase->is_IterGVN();
 768     if (is_allocated(phase)) {
 769       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 770       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 771       // This can happen with late inlining when we first allocate an inline type argument
 772       // but later decide to inline the call after the callee code also triggered allocation.
 773       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 774         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 775         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 776           // Found a re-allocation
 777           Node* res = alloc->result_cast();
 778           if (res != nullptr && res->is_CheckCastPP()) {
 779             // Replace allocation by oop and unlink AllocateNode
 780             replace_allocation(igvn, res, oop);
 781             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 782             --i; --imax;
 783           }
 784         }
 785       }
 786     }
 787   }
 788 
 789   return nullptr;
 790 }
 791 
 792 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 793   // Create a new InlineTypeNode with uninitialized values and nullptr oop
 794   bool use_default_oop = vk->is_empty() && vk->is_initialized() && null_free;
 795   Node* oop = use_default_oop ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 796   InlineTypeNode* vt = new InlineTypeNode(vk, oop, null_free);
 797   vt->set_is_buffered(gvn, use_default_oop);
 798   vt->set_is_init(gvn);
 799   return vt;
 800 }
 801 
 802 Node* InlineTypeNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 803   // Returns the constant oop of the default inline type allocation
 804   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 805 }
 806 
 807 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval) {
 808   GrowableArray<ciType*> visited;
 809   visited.push(vk);
 810   return make_default_impl(gvn, vk, visited, is_larval);
 811 }
 812 
 813 InlineTypeNode* InlineTypeNode::make_default_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) {
 814   // Create a new InlineTypeNode with default values
 815   Node* oop = vk->is_initialized() && !is_larval ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 816   InlineTypeNode* vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 817   vt->set_is_buffered(gvn, vk->is_initialized() && !is_larval);
 818   vt->set_is_init(gvn);
 819   vt->set_is_larval(is_larval);
 820   for (uint i = 0; i < vt->field_count(); ++i) {
 821     ciType* ft = vt->field_type(i);
 822     Node* value = gvn.zerocon(ft->basic_type());
 823     if (!vt->field_is_flat(i) && visited.contains(ft)) {
 824       gvn.C->set_has_circular_inline_type(true);
 825     } else if (ft->is_inlinetype()) {
 826       int old_len = visited.length();
 827       visited.push(ft);
 828       ciInlineKlass* vk = ft->as_inline_klass();
 829       if (vt->field_is_null_free(i)) {
 830         value = make_default_impl(gvn, vk, visited);
 831       } else {
 832         value = make_null_impl(gvn, vk, visited);
 833       }
 834       visited.trunc_to(old_len);
 835     }
 836     vt->set_field_value(i, value);
 837   }
 838   vt = gvn.transform(vt)->as_InlineType();
 839   assert(vt->is_default(&gvn), "must be the default inline type");
 840   return vt;
 841 }
 842 
 843 bool InlineTypeNode::is_default(PhaseGVN* gvn) const {
 844   const Type* tinit = gvn->type(get_is_init());
 845   if (!tinit->isa_int() || !tinit->is_int()->is_con(1)) {
 846     return false; // May be null
 847   }
 848   for (uint i = 0; i < field_count(); ++i) {
 849     Node* value = field_value(i);
 850     if (field_is_null_free(i)) {
 851       // Null-free value class field must have the default value
 852       if (!value->is_InlineType() || !value->as_InlineType()->is_default(gvn)) {
 853         return false;
 854       }
 855       continue;
 856     } else if (value->is_InlineType()) {
 857       // Nullable value class field must be null
 858       const Type* tinit = gvn->type(value->as_InlineType()->get_is_init());
 859       if (tinit->isa_int() && tinit->is_int()->is_con(0)) {
 860         continue;
 861       }
 862       return false;
 863     }
 864     if (!gvn->type(value)->is_zero_type()) {
 865       return false;
 866     }
 867   }
 868   return true;
 869 }
 870 
 871 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, bool is_larval) {
 872   GrowableArray<ciType*> visited;
 873   visited.push(vk);
 874   return make_from_oop_impl(kit, oop, vk, null_free, visited, is_larval);
 875 }
 876 
 877 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, GrowableArray<ciType*>& visited, bool is_larval) {
 878   PhaseGVN& gvn = kit->gvn();
 879 
 880   if (!is_larval && vk->is_empty() && null_free) {
 881     InlineTypeNode* def = make_default_impl(gvn, vk, visited);
 882     kit->record_for_igvn(def);
 883     return def;
 884   }
 885   // Create and initialize an InlineTypeNode by loading all field
 886   // values from a heap-allocated version and also save the oop.
 887   InlineTypeNode* vt = nullptr;
 888 
 889   if (oop->isa_InlineType()) {
 890     // TODO 8325106 Re-enable assert and fix OSR code
 891     // Issue triggers with TestValueConstruction.java and -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch -XX:-TieredCompilation
 892     // assert(!is_larval || oop->as_InlineType()->is_larval(), "must be larval");
 893     if (is_larval && !oop->as_InlineType()->is_larval()) {
 894       vt = oop->clone()->as_InlineType();
 895       vt->set_is_larval(true);
 896       return gvn.transform(vt)->as_InlineType();
 897     }
 898     return oop->as_InlineType();
 899   } else if (gvn.type(oop)->maybe_null()) {
 900     // Add a null check because the oop may be null
 901     Node* null_ctl = kit->top();
 902     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 903     if (kit->stopped()) {
 904       // Constant null
 905       kit->set_control(null_ctl);
 906       if (null_free) {
 907         vt = make_default_impl(gvn, vk, visited);
 908       } else {
 909         vt = make_null_impl(gvn, vk, visited);
 910       }
 911       kit->record_for_igvn(vt);
 912       return vt;
 913     }
 914     vt = new InlineTypeNode(vk, not_null_oop, null_free);
 915     vt->set_is_buffered(gvn);
 916     vt->set_is_init(gvn);
 917     vt->set_is_larval(is_larval);
 918     vt->load(kit, not_null_oop, not_null_oop, vk, visited);
 919 
 920     if (null_ctl != kit->top()) {
 921       InlineTypeNode* null_vt = nullptr;
 922       if (null_free) {
 923         null_vt = make_default_impl(gvn, vk, visited);
 924       } else {
 925         null_vt = make_null_impl(gvn, vk, visited);
 926       }
 927       Node* region = new RegionNode(3);
 928       region->init_req(1, kit->control());
 929       region->init_req(2, null_ctl);
 930       vt = vt->clone_with_phis(&gvn, region, kit->map());
 931       vt->merge_with(&gvn, null_vt, 2, true);
 932       if (!null_free) {
 933         vt->set_oop(gvn, oop);
 934       }
 935       kit->set_control(gvn.transform(region));
 936     }
 937   } else {
 938     // Oop can never be null
 939     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 940     Node* init_ctl = kit->control();
 941     vt->set_is_buffered(gvn);
 942     vt->set_is_init(gvn);
 943     vt->set_is_larval(is_larval);
 944     vt->load(kit, oop, oop, vk, visited);
 945 // TODO 8284443
 946 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
 947 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 948   }
 949   assert(vt->is_allocated(&gvn) || (null_free && !vk->is_initialized()), "inline type should be allocated");
 950   kit->record_for_igvn(vt);
 951   return gvn.transform(vt)->as_InlineType();
 952 }
 953 
 954 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 955   GrowableArray<ciType*> visited;
 956   visited.push(vk);
 957   return make_from_flat_impl(kit, vk, obj, ptr, holder, holder_offset, decorators, visited);
 958 }
 959 
 960 // GraphKit wrapper for the 'make_from_flat' method
 961 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 962   if (kit->gvn().type(obj)->isa_aryptr()) {
 963     kit->C->set_flat_accesses();
 964   }
 965   // Create and initialize an InlineTypeNode by loading all field values from
 966   // a flat inline type field at 'holder_offset' or from an inline type array.
 967   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 968   // The inline type is flattened into the object without an oop header. Subtract the
 969   // offset of the first field to account for the missing header when loading the values.
 970   holder_offset -= vk->first_field_offset();
 971   vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators);
 972   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 973   return kit->gvn().transform(vt)->as_InlineType();
 974 }
 975 
 976 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 977   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
 978   if (!in) {
 979     // Keep track of the oop. The returned inline type might already be buffered.
 980     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 981     vt->set_oop(kit->gvn(), oop);
 982   }
 983   GrowableArray<ciType*> visited;
 984   visited.push(vk);
 985   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
 986   return kit->gvn().transform(vt)->as_InlineType();
 987 }
 988 
 989 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 990   ciInlineKlass* vk = inline_klass();
 991   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 992   for (uint i = 1; i < req(); ++i) {
 993     res->set_req(i, in(i));
 994   }
 995 
 996   if (allocate) {
 997     // Re-execute if buffering triggers deoptimization
 998     PreserveReexecuteState preexecs(kit);
 999     kit->jvms()->set_should_reexecute(true);
1000     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
1001     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, true);
1002     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
1003     alloc->_larval = true;
1004 
1005     store(kit, alloc_oop, alloc_oop, vk);
1006     res->set_oop(kit->gvn(), alloc_oop);
1007   }
1008   // TODO 8239003
1009   //res->set_type(TypeInlineType::make(vk, true));
1010   res = kit->gvn().transform(res)->as_InlineType();
1011   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
1012   return res;
1013 }
1014 
1015 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
1016   Node* obj = get_oop();
1017   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
1018   Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1019   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
1020   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
1021 
1022   // Do not let stores that initialize this buffer be reordered with a subsequent
1023   // store that would make this buffer accessible by other threads.
1024   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj);
1025   assert(alloc != nullptr, "must have an allocation node");
1026   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1027 
1028   ciInlineKlass* vk = inline_klass();
1029   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
1030   for (uint i = 1; i < req(); ++i) {
1031     res->set_req(i, in(i));
1032   }
1033   // TODO 8239003
1034   //res->set_type(TypeInlineType::make(vk, false));
1035   res = kit->gvn().transform(res)->as_InlineType();
1036   return res;
1037 }
1038 
1039 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const {
1040   if (!is_allocated(gvn)) {
1041     return false;
1042   }
1043 
1044   Node* oop = get_oop();
1045   AllocateNode* alloc = AllocateNode::Ideal_allocation(oop);
1046   return alloc != nullptr && alloc->_larval;
1047 }
1048 
1049 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1050   if (vk == nullptr) {
1051     vk = inline_klass();
1052   }
1053   if (field_count() == 0 && vk->is_initialized()) {
1054     const Type* tinit = phase->type(in(IsInit));
1055     // TODO 8325106
1056     if (false && !is_larval() && tinit->isa_int() && tinit->is_int()->is_con(1)) {
1057       assert(is_allocated(phase), "must be allocated");
1058       return get_oop();
1059     } else {
1060       // TODO 8284443
1061       return nullptr;
1062     }
1063   }
1064   for (uint i = 0; i < field_count(); ++i) {
1065     int offset = holder_offset + field_offset(i);
1066     Node* value = field_value(i);
1067     if (value->is_InlineType()) {
1068       InlineTypeNode* vt = value->as_InlineType();
1069       if (vt->type()->inline_klass()->is_empty()) {
1070         continue;
1071       } else if (field_is_flat(i) && vt->is_InlineType()) {
1072         // Check inline type field load recursively
1073         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
1074         if (base == nullptr) {
1075           return nullptr;
1076         }
1077         continue;
1078       } else {
1079         value = vt->get_oop();
1080         if (value->Opcode() == Op_CastPP) {
1081           // Skip CastPP
1082           value = value->in(1);
1083         }
1084       }
1085     }
1086     if (value->isa_DecodeN()) {
1087       // Skip DecodeN
1088       value = value->in(1);
1089     }
1090     if (value->isa_Load()) {
1091       // Check if base and offset of field load matches inline type layout
1092       intptr_t loffset = 0;
1093       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1094       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1095         return nullptr;
1096       } else if (base == nullptr) {
1097         // Set base and check if pointer type matches
1098         base = lbase;
1099         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1100         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1101           return nullptr;
1102         }
1103       }
1104     } else {
1105       return nullptr;
1106     }
1107   }
1108   return base;
1109 }
1110 
1111 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1112   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1113   intptr_t bits = tk->get_con();
1114   set_nth_bit(bits, 0);
1115   return gvn.longcon((jlong)bits);
1116 }
1117 
1118 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1119   if (!null_free && in) {
1120     n->init_req(base_input++, get_is_init());
1121   }
1122   for (uint i = 0; i < field_count(); i++) {
1123     Node* arg = field_value(i);
1124     if (field_is_flat(i)) {
1125       // Flat inline type field
1126       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1127     } else {
1128       if (arg->is_InlineType()) {
1129         // Non-flat inline type field
1130         InlineTypeNode* vt = arg->as_InlineType();
1131         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1132         arg = vt->buffer(kit);
1133       }
1134       // Initialize call/return arguments
1135       n->init_req(base_input++, arg);
1136       if (field_type(i)->size() == 2) {
1137         n->init_req(base_input++, kit->top());
1138       }
1139     }
1140   }
1141   // The last argument is used to pass IsInit information to compiled code and not required here.
1142   if (!null_free && !in) {
1143     n->init_req(base_input++, kit->top());
1144   }
1145 }
1146 
1147 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1148   PhaseGVN& gvn = kit->gvn();
1149   Node* is_init = nullptr;
1150   if (!null_free) {
1151     // Nullable inline type
1152     if (in) {
1153       // Set IsInit field
1154       if (multi->is_Start()) {
1155         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1156       } else {
1157         is_init = multi->as_Call()->in(base_input);
1158       }
1159       set_req(IsInit, is_init);
1160       base_input++;
1161     }
1162     // Add a null check to make subsequent loads dependent on
1163     assert(null_check_region == nullptr, "already set");
1164     if (is_init == nullptr) {
1165       // Will only be initialized below, use dummy node for now
1166       is_init = new Node(1);
1167       gvn.set_type_bottom(is_init);
1168     }
1169     Node* null_ctrl = kit->top();
1170     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1171     Node* non_null_ctrl = kit->control();
1172     null_check_region = new RegionNode(3);
1173     null_check_region->init_req(1, non_null_ctrl);
1174     null_check_region->init_req(2, null_ctrl);
1175     null_check_region = gvn.transform(null_check_region);
1176     kit->set_control(null_check_region);
1177   }
1178 
1179   for (uint i = 0; i < field_count(); ++i) {
1180     ciType* type = field_type(i);
1181     Node* parm = nullptr;
1182     if (field_is_flat(i)) {
1183       // Flat inline type field
1184       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
1185       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1186       parm = gvn.transform(vt);
1187     } else {
1188       if (multi->is_Start()) {
1189         assert(in, "return from start?");
1190         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1191       } else if (in) {
1192         parm = multi->as_Call()->in(base_input);
1193       } else {
1194         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1195       }
1196       // Non-flat inline type field
1197       if (type->is_inlinetype()) {
1198         if (null_check_region != nullptr) {
1199           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1200           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1201           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1202           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1203             parm = parm->as_InlineType()->get_oop();
1204           }
1205           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1206           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1207           parm->set_req(2, kit->zerocon(T_OBJECT));
1208           parm = gvn.transform(parm);
1209         }
1210         if (visited.contains(type)) {
1211           kit->C->set_has_circular_inline_type(true);
1212         } else if (!parm->is_InlineType()) {
1213           int old_len = visited.length();
1214           visited.push(type);
1215           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), field_is_null_free(i), visited);
1216           visited.trunc_to(old_len);
1217         }
1218       }
1219       base_input += type->size();
1220     }
1221     assert(parm != nullptr, "should never be null");
1222     assert(field_value(i) == nullptr, "already set");
1223     set_field_value(i, parm);
1224     gvn.record_for_igvn(parm);
1225   }
1226   // The last argument is used to pass IsInit information to compiled code
1227   if (!null_free && !in) {
1228     Node* cmp = is_init->raw_out(0);
1229     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1230     set_req(IsInit, is_init);
1231     gvn.hash_delete(cmp);
1232     cmp->set_req(1, is_init);
1233     gvn.hash_find_insert(cmp);
1234     base_input++;
1235   }
1236 }
1237 
1238 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1239 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1240 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1241   // TODO 8332886 Really needed? GVN is disabled anyway.
1242   if (is_larval()) {
1243     return;
1244   }
1245   PhaseIterGVN* igvn = &phase->igvn();
1246   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1247   // will be removed anyway and changing the memory chain will confuse other optimizations.
1248   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1249     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1250     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1251       Node* res = alloc->result_cast();
1252       if (res == nullptr || !res->is_CheckCastPP()) {
1253         break; // No unique CheckCastPP
1254       }
1255       // TODO 8325106
1256       // assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1257       // Search for a dominating allocation of the same inline type
1258       Node* res_dom = res;
1259       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1260         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1261         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1262           Node* res_other = alloc_other->result_cast();
1263           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1264               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1265             res_dom = res_other;
1266           }
1267         }
1268       }
1269       if (res_dom != res) {
1270         // Replace allocation by dominating one.
1271         replace_allocation(igvn, res, res_dom);
1272         // The result of the dominated allocation is now unused and will be removed
1273         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1274         igvn->_worklist.push(alloc);
1275       }
1276     }
1277   }
1278 }
1279 
1280 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1281   GrowableArray<ciType*> visited;
1282   visited.push(vk);
1283   return make_null_impl(gvn, vk, visited, transform);
1284 }
1285 
1286 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1287   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1288   vt->set_is_buffered(gvn);
1289   vt->set_is_init(gvn, false);
1290   for (uint i = 0; i < vt->field_count(); i++) {
1291     ciType* ft = vt->field_type(i);
1292     Node* value = gvn.zerocon(ft->basic_type());
1293     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1294       gvn.C->set_has_circular_inline_type(true);
1295     } else if (ft->is_inlinetype()) {
1296       int old_len = visited.length();
1297       visited.push(ft);
1298       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1299       visited.trunc_to(old_len);
1300     }
1301     vt->set_field_value(i, value);
1302   }
1303   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1304 }
1305 
1306 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1307   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1308     return clone()->as_InlineType();
1309   }
1310   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1311     if (fast_out(i) != map) {
1312       return clone()->as_InlineType();
1313     }
1314   }
1315   gvn->hash_delete(this);
1316   return this;
1317 }
1318 
1319 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1320   Node* oop = get_oop();
1321   const Type* toop = phase->type(oop);
1322 #ifdef ASSERT
1323   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1324     // We are not allocated (anymore) and should therefore not have an instance id
1325     dump(1);
1326     assert(false, "Unbuffered inline type should not have known instance id");
1327   }
1328 #endif
1329   const Type* t = toop->filter_speculative(_type);
1330   if (t->singleton()) {
1331     // Don't replace InlineType by a constant
1332     t = _type;
1333   }
1334   const Type* tinit = phase->type(in(IsInit));
1335   if (tinit == Type::TOP) {
1336     return Type::TOP;
1337   }
1338   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1339     t = t->join_speculative(TypePtr::NOTNULL);
1340   }
1341   return t;
1342 }