1 /*
   2  * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, bool is_init) {
  39   InlineTypeNode* vt = clone()->as_InlineType();
  40   const Type* t = Type::get_const_type(inline_klass());
  41   gvn->set_type(vt, t);
  42   vt->as_InlineType()->set_type(t);
  43 
  44   // Create a PhiNode for merging the oop values
  45   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  46   gvn->set_type(oop, t);
  47   gvn->record_for_igvn(oop);
  48   vt->set_oop(oop);
  49 
  50   // Create a PhiNode for merging the is_buffered values
  51   t = Type::get_const_basic_type(T_BOOLEAN);
  52   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  53   gvn->set_type(is_buffered_node, t);
  54   gvn->record_for_igvn(is_buffered_node);
  55   vt->set_req(IsBuffered, is_buffered_node);
  56 
  57   // Create a PhiNode for merging the is_init values
  58   Node* is_init_node;
  59   if (is_init) {
  60     is_init_node = gvn->intcon(1);
  61   } else {
  62     t = Type::get_const_basic_type(T_BOOLEAN);
  63     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  64     gvn->set_type(is_init_node, t);
  65     gvn->record_for_igvn(is_init_node);
  66   }
  67   vt->set_req(IsInit, is_init_node);
  68 
  69   // Create a PhiNode each for merging the field values
  70   for (uint i = 0; i < vt->field_count(); ++i) {
  71     ciType* type = vt->field_type(i);
  72     Node*  value = vt->field_value(i);
  73     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  74     // of the same type but with different scalarization depth during IGVN. To avoid inconsistencies
  75     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  76     bool no_circularity = !gvn->C->has_circular_inline_type() || !gvn->is_IterGVN() || field_is_flat(i);
  77     if (value->is_InlineType() && no_circularity) {
  78       // Handle inline type fields recursively
  79       value = value->as_InlineType()->clone_with_phis(gvn, region);
  80     } else {
  81       t = Type::get_const_type(type);
  82       value = PhiNode::make(region, value, t);
  83       gvn->set_type(value, t);
  84       gvn->record_for_igvn(value);
  85     }
  86     vt->set_field_value(i, value);
  87   }
  88   gvn->record_for_igvn(vt);
  89   return vt;
  90 }
  91 
  92 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  93 // for the given region (see InlineTypeNode::clone_with_phis).
  94 bool InlineTypeNode::has_phi_inputs(Node* region) {
  95   // Check oop input
  96   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  97 #ifdef ASSERT
  98   if (result) {
  99     // Check all field value inputs for consistency
 100     for (uint i = Values; i < field_count(); ++i) {
 101       Node* n = in(i);
 102       if (n->is_InlineType()) {
 103         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 104       } else {
 105         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 106       }
 107     }
 108   }
 109 #endif
 110   return result;
 111 }
 112 
 113 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 114 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 115   // Merge oop inputs
 116   PhiNode* phi = get_oop()->as_Phi();
 117   phi->set_req(pnum, other->get_oop());
 118   if (transform) {
 119     set_oop(gvn->transform(phi));
 120   }
 121 
 122   // Merge is_buffered inputs
 123   phi = get_is_buffered()->as_Phi();
 124   phi->set_req(pnum, other->get_is_buffered());
 125   if (transform) {
 126     set_req(IsBuffered, gvn->transform(phi));
 127   }
 128 
 129   // Merge is_init inputs
 130   Node* is_init = get_is_init();
 131   if (is_init->is_Phi()) {
 132     phi = is_init->as_Phi();
 133     phi->set_req(pnum, other->get_is_init());
 134     if (transform) {
 135       set_req(IsInit, gvn->transform(phi));
 136     }
 137   } else {
 138     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 139   }
 140 
 141   // Merge field values
 142   for (uint i = 0; i < field_count(); ++i) {
 143     Node* val1 =        field_value(i);
 144     Node* val2 = other->field_value(i);
 145     if (val1->is_InlineType()) {
 146       if (val2->is_Phi()) {
 147         val2 = gvn->transform(val2);
 148       }
 149       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 150     } else {
 151       assert(val1->is_Phi(), "must be a phi node");
 152       val1->set_req(pnum, val2);
 153     }
 154     if (transform) {
 155       set_field_value(i, gvn->transform(val1));
 156     }
 157   }
 158   return this;
 159 }
 160 
 161 // Adds a new merge path to an inline type node with phi inputs
 162 void InlineTypeNode::add_new_path(Node* region) {
 163   assert(has_phi_inputs(region), "must have phi inputs");
 164 
 165   PhiNode* phi = get_oop()->as_Phi();
 166   phi->add_req(nullptr);
 167   assert(phi->req() == region->req(), "must be same size as region");
 168 
 169   phi = get_is_buffered()->as_Phi();
 170   phi->add_req(nullptr);
 171   assert(phi->req() == region->req(), "must be same size as region");
 172 
 173   phi = get_is_init()->as_Phi();
 174   phi->add_req(nullptr);
 175   assert(phi->req() == region->req(), "must be same size as region");
 176 
 177   for (uint i = 0; i < field_count(); ++i) {
 178     Node* val = field_value(i);
 179     if (val->is_InlineType()) {
 180       val->as_InlineType()->add_new_path(region);
 181     } else {
 182       val->as_Phi()->add_req(nullptr);
 183       assert(val->req() == region->req(), "must be same size as region");
 184     }
 185   }
 186 }
 187 
 188 Node* InlineTypeNode::field_value(uint index) const {
 189   assert(index < field_count(), "index out of bounds");
 190   return in(Values + index);
 191 }
 192 
 193 // Get the value of the field at the given offset.
 194 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 195 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 196   // If the field at 'offset' belongs to a flat inline type field, 'index' refers to the
 197   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 198   int index = inline_klass()->field_index_by_offset(offset);
 199   int sub_offset = offset - field_offset(index);
 200   Node* value = field_value(index);
 201   assert(value != nullptr, "field value not found");
 202   if (recursive && value->is_InlineType()) {
 203     if (field_is_flat(index)) {
 204       // Flat inline type field
 205       InlineTypeNode* vt = value->as_InlineType();
 206       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 207       return vt->field_value_by_offset(sub_offset, recursive);
 208     } else {
 209       assert(sub_offset == 0, "should not have a sub offset");
 210       return value;
 211     }
 212   }
 213   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 214   assert(sub_offset == 0, "offset mismatch");
 215   return value;
 216 }
 217 
 218 void InlineTypeNode::set_field_value(uint index, Node* value) {
 219   assert(index < field_count(), "index out of bounds");
 220   set_req(Values + index, value);
 221 }
 222 
 223 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 224   set_field_value(field_index(offset), value);
 225 }
 226 
 227 int InlineTypeNode::field_offset(uint index) const {
 228   assert(index < field_count(), "index out of bounds");
 229   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 230 }
 231 
 232 uint InlineTypeNode::field_index(int offset) const {
 233   uint i = 0;
 234   for (; i < field_count() && field_offset(i) != offset; i++) { }
 235   assert(i < field_count(), "field not found");
 236   return i;
 237 }
 238 
 239 ciType* InlineTypeNode::field_type(uint index) const {
 240   assert(index < field_count(), "index out of bounds");
 241   return inline_klass()->declared_nonstatic_field_at(index)->type();
 242 }
 243 
 244 bool InlineTypeNode::field_is_flat(uint index) const {
 245   assert(index < field_count(), "index out of bounds");
 246   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 247   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 248   return field->is_flat();
 249 }
 250 
 251 bool InlineTypeNode::field_is_null_free(uint index) const {
 252   assert(index < field_count(), "index out of bounds");
 253   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 254   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 255   return field->is_null_free();
 256 }
 257 
 258 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 259   ciInlineKlass* vk = inline_klass();
 260   uint nfields = vk->nof_nonstatic_fields();
 261   JVMState* jvms = sfpt->jvms();
 262   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 263   assert(jvms != nullptr, "missing JVMS");
 264   uint first_ind = (sfpt->req() - jvms->scloff());
 265   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 266                                                                   nullptr,
 267                                                                   first_ind, nfields);
 268   sobj->init_req(0, igvn->C->root());
 269   // Nullable inline types have an IsInit field that needs
 270   // to be checked before using the field values.
 271   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 272     sfpt->add_req(get_is_init());
 273   } else {
 274     sfpt->add_req(igvn->C->top());
 275   }
 276   // Iterate over the inline type fields in order of increasing
 277   // offset and add the field values to the safepoint.
 278   for (uint j = 0; j < nfields; ++j) {
 279     int offset = vk->nonstatic_field_at(j)->offset_in_bytes();
 280     Node* value = field_value_by_offset(offset, true /* include flat inline type fields */);
 281     if (value->is_InlineType()) {
 282       // Add inline type field to the worklist to process later
 283       worklist.push(value);
 284     }
 285     sfpt->add_req(value);
 286   }
 287   jvms->set_endoff(sfpt->req());
 288   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 289   igvn->rehash_node_delayed(sfpt);
 290   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 291     Node* debug = sfpt->in(i);
 292     if (debug != nullptr && debug->uncast() == this) {
 293       sfpt->set_req(i, sobj);
 294     }
 295   }
 296 }
 297 
 298 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 299   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 300   // in the safepoint to avoid keeping field loads live just for the debug info.
 301   Node* oop = get_oop();
 302   bool use_oop = allow_oop && is_allocated(igvn) &&
 303                  (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 304 
 305   ResourceMark rm;
 306   Unique_Node_List safepoints;
 307   Unique_Node_List vt_worklist;
 308   Unique_Node_List worklist;
 309   worklist.push(this);
 310   while (worklist.size() > 0) {
 311     Node* n = worklist.pop();
 312     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 313       Node* use = n->fast_out(i);
 314       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 315         safepoints.push(use);
 316       } else if (use->is_ConstraintCast()) {
 317         worklist.push(use);
 318       }
 319     }
 320   }
 321 
 322   // Process all safepoint uses and scalarize inline type
 323   while (safepoints.size() > 0) {
 324     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 325     if (use_oop) {
 326       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 327         Node* debug = sfpt->in(i);
 328         if (debug != nullptr && debug->uncast() == this) {
 329           sfpt->set_req(i, get_oop());
 330         }
 331       }
 332       igvn->rehash_node_delayed(sfpt);
 333     } else {
 334       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 335     }
 336   }
 337   // Now scalarize non-flat fields
 338   for (uint i = 0; i < vt_worklist.size(); ++i) {
 339     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 340     vt->make_scalar_in_safepoints(igvn);
 341   }
 342   if (outcnt() == 0) {
 343     igvn->_worklist.push(this);
 344   }
 345 }
 346 
 347 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 348   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 349   const TypePtr* adr_type = nullptr;
 350   bool is_array = ary_type != nullptr;
 351   if ((decorators & C2_MISMATCHED) != 0) {
 352     adr_type = TypeRawPtr::BOTTOM;
 353   } else if (is_array) {
 354     // In the case of a flat inline type array, each field has its own slice
 355     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 356   } else {
 357     ciField* field = holder->get_field_by_offset(offset, false);
 358     assert(field != nullptr, "field not found");
 359     adr_type = gvn.C->alias_type(field)->adr_type();
 360   }
 361   return adr_type;
 362 }
 363 
 364 // We limit scalarization for inline types with circular fields and can therefore observe
 365 // nodes of same type but with different scalarization depth during GVN. This method adjusts
 366 // the scalarization depth to avoid inconsistencies during merging.
 367 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 368   if (!kit->C->has_circular_inline_type()) {
 369     return this;
 370   }
 371   GrowableArray<ciType*> visited;
 372   visited.push(inline_klass());
 373   return adjust_scalarization_depth_impl(kit, visited);
 374 }
 375 
 376 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 377   InlineTypeNode* val = this;
 378   for (uint i = 0; i < field_count(); ++i) {
 379     Node* value = field_value(i);
 380     Node* new_value = value;
 381     ciType* ft = field_type(i);
 382     if (value->is_InlineType()) {
 383       if (!field_is_flat(i) && visited.contains(ft)) {
 384         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 385       } else {
 386         int old_len = visited.length();
 387         visited.push(ft);
 388         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 389         visited.trunc_to(old_len);
 390       }
 391     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 392       int old_len = visited.length();
 393       visited.push(ft);
 394       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), field_is_null_free(i), visited);
 395       visited.trunc_to(old_len);
 396     }
 397     if (value != new_value) {
 398       if (val == this) {
 399         val = clone()->as_InlineType();
 400       }
 401       val->set_field_value(i, new_value);
 402     }
 403   }
 404   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 405 }
 406 
 407 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) {
 408   // Initialize the inline type by loading its field values from
 409   // memory and adding the values as input edges to the node.
 410   for (uint i = 0; i < field_count(); ++i) {
 411     int offset = holder_offset + field_offset(i);
 412     Node* value = nullptr;
 413     ciType* ft = field_type(i);
 414     bool null_free = field_is_null_free(i);
 415     if (null_free && ft->as_inline_klass()->is_empty()) {
 416       // Loading from a field of an empty inline type. Just return the default instance.
 417       value = make_default_impl(kit->gvn(), ft->as_inline_klass(), visited);
 418     } else if (field_is_flat(i)) {
 419       // Recursively load the flat inline type field
 420       value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators, visited);
 421     } else {
 422       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 423       bool is_array = (oop_ptr->isa_aryptr() != nullptr);
 424       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 425       if (base->is_Con() && !is_array && !mismatched) {
 426         // If the oop to the inline type is constant (static final field), we can
 427         // also treat the fields as constants because the inline type is immutable.
 428         ciObject* constant_oop = oop_ptr->const_oop();
 429         ciField* field = holder->get_field_by_offset(offset, false);
 430         assert(field != nullptr, "field not found");
 431         ciConstant constant = constant_oop->as_instance()->field_value(field);
 432         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 433         assert(con_type != nullptr, "type not found");
 434         value = kit->gvn().transform(kit->makecon(con_type));
 435         // Check type of constant which might be more precise than the static field type
 436         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 437           ft = con_type->inline_klass();
 438           null_free = true;
 439         }
 440       } else {
 441         // Load field value from memory
 442         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 443         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 444         BasicType bt = type2field[ft->basic_type()];
 445         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 446         const Type* val_type = Type::get_const_type(ft);
 447         value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 448       }
 449       // Loading a non-flattened inline type from memory
 450       if (visited.contains(ft)) {
 451         kit->C->set_has_circular_inline_type(true);
 452       } else if (ft->is_inlinetype()) {
 453         int old_len = visited.length();
 454         visited.push(ft);
 455         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), null_free, visited);
 456         visited.trunc_to(old_len);
 457       }
 458     }
 459     set_field_value(i, value);
 460   }
 461 }
 462 
 463 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 464   if (kit->gvn().type(base)->isa_aryptr()) {
 465     kit->C->set_flat_accesses();
 466   }
 467   // The inline type is embedded into the object without an oop header. Subtract the
 468   // offset of the first field to account for the missing header when storing the values.
 469   if (holder == nullptr) {
 470     holder = inline_klass();
 471   }
 472   holder_offset -= inline_klass()->first_field_offset();
 473   store(kit, base, ptr, holder, holder_offset, decorators);
 474 }
 475 
 476 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 477   // Write field values to memory
 478   for (uint i = 0; i < field_count(); ++i) {
 479     int offset = holder_offset + field_offset(i);
 480     Node* value = field_value(i);
 481     ciType* ft = field_type(i);
 482     if (field_is_flat(i)) {
 483       // Recursively store the flat inline type field
 484       value->as_InlineType()->store_flat(kit, base, ptr, holder, offset, decorators);
 485     } else {
 486       // Store field value to memory
 487       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 488       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 489       BasicType bt = type2field[ft->basic_type()];
 490       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 491       const Type* val_type = Type::get_const_type(ft);
 492       bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr);
 493       kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 494     }
 495   }
 496 }
 497 
 498 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 499   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 500     // Already buffered
 501     return this;
 502   }
 503 
 504   // Check if inline type is already buffered
 505   Node* not_buffered_ctl = kit->top();
 506   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 507   if (not_buffered_ctl->is_top()) {
 508     // Already buffered
 509     InlineTypeNode* vt = clone()->as_InlineType();
 510     vt->set_is_buffered(kit->gvn());
 511     vt = kit->gvn().transform(vt)->as_InlineType();
 512     if (safe_for_replace) {
 513       kit->replace_in_map(this, vt);
 514     }
 515     return vt;
 516   }
 517   Node* buffered_ctl = kit->control();
 518   kit->set_control(not_buffered_ctl);
 519 
 520   // Inline type is not buffered, check if it is null.
 521   Node* null_ctl = kit->top();
 522   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 523   bool null_free = null_ctl->is_top();
 524 
 525   RegionNode* region = new RegionNode(4);
 526   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 527 
 528   // InlineType is already buffered
 529   region->init_req(1, buffered_ctl);
 530   oop->init_req(1, not_null_oop);
 531 
 532   // InlineType is null
 533   region->init_req(2, null_ctl);
 534   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 535 
 536   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 537   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 538 
 539   int bci = kit->bci();
 540   bool reexecute = kit->jvms()->should_reexecute();
 541   if (!kit->stopped()) {
 542     assert(!is_allocated(&kit->gvn()), "already buffered");
 543 
 544     // Allocate and initialize buffer
 545     PreserveJVMState pjvms(kit);
 546     // Propagate re-execution state and bci
 547     kit->set_bci(bci);
 548     kit->jvms()->set_bci(bci);
 549     kit->jvms()->set_should_reexecute(reexecute);
 550 
 551     kit->kill_dead_locals();
 552     ciInlineKlass* vk = inline_klass();
 553     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 554     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 555     store(kit, alloc_oop, alloc_oop, vk);
 556 
 557     // Do not let stores that initialize this buffer be reordered with a subsequent
 558     // store that would make this buffer accessible by other threads.
 559     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 560     assert(alloc != nullptr, "must have an allocation node");
 561     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 562 
 563     region->init_req(3, kit->control());
 564     oop   ->init_req(3, alloc_oop);
 565     io    ->init_req(3, kit->i_o());
 566     mem   ->init_req(3, kit->merged_memory());
 567   }
 568 
 569   // Update GraphKit
 570   kit->set_control(kit->gvn().transform(region));
 571   kit->set_i_o(kit->gvn().transform(io));
 572   kit->set_all_memory(kit->gvn().transform(mem));
 573   kit->record_for_igvn(region);
 574   kit->record_for_igvn(oop);
 575   kit->record_for_igvn(io);
 576   kit->record_for_igvn(mem);
 577 
 578   // Use cloned InlineTypeNode to propagate oop from now on
 579   Node* res_oop = kit->gvn().transform(oop);
 580   InlineTypeNode* vt = clone()->as_InlineType();
 581   vt->set_oop(res_oop);
 582   vt->set_is_buffered(kit->gvn());
 583   vt = kit->gvn().transform(vt)->as_InlineType();
 584   if (safe_for_replace) {
 585     kit->replace_in_map(this, vt);
 586   }
 587   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 588   // Make sure it gets a chance to remove this allocation.
 589   kit->C->set_has_split_ifs(true);
 590   return vt;
 591 }
 592 
 593 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 594   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 595     return true;
 596   }
 597   Node* oop = get_oop();
 598   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 599   return !oop_type->maybe_null();
 600 }
 601 
 602 // When a call returns multiple values, it has several result
 603 // projections, one per field. Replacing the result of the call by an
 604 // inline type node (after late inlining) requires that for each result
 605 // projection, we find the corresponding inline type field.
 606 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C, bool null_free) {
 607   ciInlineKlass* vk = inline_klass();
 608   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 609     ProjNode* pn = call->fast_out(i)->as_Proj();
 610     uint con = pn->_con;
 611     Node* field = nullptr;
 612     if (con == TypeFunc::Parms) {
 613       field = get_oop();
 614     } else if (!null_free && con == (call->tf()->range_cc()->cnt() - 1)) {
 615       field = get_is_init();
 616     } else if (con > TypeFunc::Parms) {
 617       uint field_nb = con - (TypeFunc::Parms+1);
 618       int extra = 0;
 619       for (uint j = 0; j < field_nb - extra; j++) {
 620         ciField* f = vk->nonstatic_field_at(j);
 621         BasicType bt = f->type()->basic_type();
 622         if (bt == T_LONG || bt == T_DOUBLE) {
 623           extra++;
 624         }
 625       }
 626       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 627       field = field_value_by_offset(f->offset_in_bytes(), true);
 628     }
 629     if (field != nullptr) {
 630       C->gvn_replace_by(pn, field);
 631       C->initial_gvn()->hash_delete(pn);
 632       pn->set_req(0, C->top());
 633       --i; --imax;
 634     }
 635   }
 636 }
 637 
 638 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 639   InlineTypeNode* vt = clone()->as_InlineType();
 640   for (uint i = 0; i < field_count(); i++) {
 641      Node* value = field_value(i);
 642      if (field_is_flat(i)) {
 643        // Flat inline type field
 644        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 645      } else if (value->is_InlineType()) {
 646        // Non-flat inline type field
 647        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 648      }
 649   }
 650   vt = kit->gvn().transform(vt)->as_InlineType();
 651   kit->replace_in_map(this, vt);
 652   return vt;
 653 }
 654 
 655 // Replace a buffer allocation by a dominating allocation
 656 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 657   // Remove initializing stores and GC barriers
 658   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 659     Node* use = res->fast_out(i);
 660     if (use->is_AddP()) {
 661       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 662         Node* store = use->fast_out(j)->isa_Store();
 663         if (store != nullptr) {
 664           igvn->rehash_node_delayed(store);
 665           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 666         }
 667       }
 668     } else if (use->Opcode() == Op_CastP2X) {
 669       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 670         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 671         // we store into, as well as the value we are storing. Skip if this is a
 672         // barrier for storing 'res' into another object.
 673         continue;
 674       }
 675       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 676       bs->eliminate_gc_barrier(igvn, use);
 677       --i; --imax;
 678     }
 679   }
 680   igvn->replace_node(res, dom);
 681 }
 682 
 683 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 684   Node* oop = get_oop();
 685   if (!is_larval(phase) &&
 686       is_default(phase) &&
 687       inline_klass()->is_initialized() &&
 688       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 689     // Use the pre-allocated oop for default inline types
 690     set_oop(default_oop(*phase, inline_klass()));
 691     assert(is_allocated(phase), "should now be allocated");
 692     return this;
 693   }
 694   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 695     InlineTypeNode* vtptr = oop->as_InlineType();
 696     set_oop(vtptr->get_oop());
 697     set_is_buffered(*phase);
 698     set_is_init(*phase);
 699     for (uint i = Values; i < vtptr->req(); ++i) {
 700       set_req(i, vtptr->in(i));
 701     }
 702     return this;
 703   }
 704   if (!is_allocated(phase)) {
 705     // Save base oop if fields are loaded from memory and the inline
 706     // type is not buffered (in this case we should not use the oop).
 707     Node* base = is_loaded(phase);
 708     if (base != nullptr && !phase->type(base)->maybe_null()) {
 709       set_oop(base);
 710       assert(is_allocated(phase), "should now be allocated");
 711       return this;
 712     }
 713   }
 714 
 715   if (can_reshape) {
 716     PhaseIterGVN* igvn = phase->is_IterGVN();
 717     if (is_allocated(phase)) {
 718       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 719       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 720       // This can happen with late inlining when we first allocate an inline type argument
 721       // but later decide to inline the call after the callee code also triggered allocation.
 722       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 723         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 724         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 725           // Found a re-allocation
 726           Node* res = alloc->result_cast();
 727           if (res != nullptr && res->is_CheckCastPP()) {
 728             // Replace allocation by oop and unlink AllocateNode
 729             replace_allocation(igvn, res, oop);
 730             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 731             --i; --imax;
 732           }
 733         }
 734       }
 735     }
 736   }
 737 
 738   return nullptr;
 739 }
 740 
 741 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 742   // Create a new InlineTypeNode with uninitialized values and nullptr oop
 743   Node* oop = (vk->is_empty() && vk->is_initialized()) ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 744   InlineTypeNode* vt = new InlineTypeNode(vk, oop, null_free);
 745   vt->set_is_buffered(gvn, vk->is_empty() && vk->is_initialized());
 746   vt->set_is_init(gvn);
 747   return vt;
 748 }
 749 
 750 Node* InlineTypeNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 751   // Returns the constant oop of the default inline type allocation
 752   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 753 }
 754 
 755 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk) {
 756   GrowableArray<ciType*> visited;
 757   visited.push(vk);
 758   return make_default_impl(gvn, vk, visited);
 759 }
 760 
 761 InlineTypeNode* InlineTypeNode::make_default_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
 762   // Create a new InlineTypeNode with default values
 763   Node* oop = vk->is_initialized() ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 764   InlineTypeNode* vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 765   vt->set_is_buffered(gvn, vk->is_initialized());
 766   vt->set_is_init(gvn);
 767   for (uint i = 0; i < vt->field_count(); ++i) {
 768     ciType* ft = vt->field_type(i);
 769     Node* value = gvn.zerocon(ft->basic_type());
 770     if (!vt->field_is_flat(i) && visited.contains(ft)) {
 771       gvn.C->set_has_circular_inline_type(true);
 772     } else if (ft->is_inlinetype()) {
 773       int old_len = visited.length();
 774       visited.push(ft);
 775       ciInlineKlass* vk = ft->as_inline_klass();
 776       if (vt->field_is_null_free(i)) {
 777         value = make_default_impl(gvn, vk, visited);
 778       } else {
 779         value = make_null_impl(gvn, vk, visited);
 780       }
 781       visited.trunc_to(old_len);
 782     }
 783     vt->set_field_value(i, value);
 784   }
 785   vt = gvn.transform(vt)->as_InlineType();
 786   assert(vt->is_default(&gvn), "must be the default inline type");
 787   return vt;
 788 }
 789 
 790 bool InlineTypeNode::is_default(PhaseGVN* gvn) const {
 791   const Type* tinit = gvn->type(in(IsInit));
 792   if (!tinit->isa_int() || !tinit->is_int()->is_con(1)) {
 793     return false; // May be null
 794   }
 795   for (uint i = 0; i < field_count(); ++i) {
 796     ciType* ft = field_type(i);
 797     Node* value = field_value(i);
 798     if (field_is_null_free(i)) {
 799       if (!value->is_InlineType() || !value->as_InlineType()->is_default(gvn)) {
 800         return false;
 801       }
 802       continue;
 803     } else if (value->is_InlineType()) {
 804       value = value->as_InlineType()->get_oop();
 805     }
 806     if (!gvn->type(value)->is_zero_type()) {
 807       return false;
 808     }
 809   }
 810   return true;
 811 }
 812 
 813 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free) {
 814   GrowableArray<ciType*> visited;
 815   visited.push(vk);
 816   return make_from_oop_impl(kit, oop, vk, null_free, visited);
 817 }
 818 
 819 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, GrowableArray<ciType*>& visited) {
 820   PhaseGVN& gvn = kit->gvn();
 821 
 822   if (vk->is_empty() && null_free) {
 823     InlineTypeNode* def = make_default_impl(gvn, vk, visited);
 824     kit->record_for_igvn(def);
 825     return def;
 826   }
 827   // Create and initialize an InlineTypeNode by loading all field
 828   // values from a heap-allocated version and also save the oop.
 829   InlineTypeNode* vt = nullptr;
 830 
 831   if (oop->isa_InlineType()) {
 832     return oop->as_InlineType();
 833   } else if (gvn.type(oop)->maybe_null()) {
 834     // Add a null check because the oop may be null
 835     Node* null_ctl = kit->top();
 836     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 837     if (kit->stopped()) {
 838       // Constant null
 839       kit->set_control(null_ctl);
 840       if (null_free) {
 841         vt = make_default_impl(gvn, vk, visited);
 842       } else {
 843         vt = make_null_impl(gvn, vk, visited);
 844       }
 845       kit->record_for_igvn(vt);
 846       return vt;
 847     }
 848     vt = new InlineTypeNode(vk, not_null_oop, null_free);
 849     vt->set_is_buffered(gvn);
 850     vt->set_is_init(gvn);
 851     vt->load(kit, not_null_oop, not_null_oop, vk, visited);
 852 
 853     if (null_ctl != kit->top()) {
 854       InlineTypeNode* null_vt = nullptr;
 855       if (null_free) {
 856         null_vt = make_default_impl(gvn, vk, visited);
 857       } else {
 858         null_vt = make_null_impl(gvn, vk, visited);
 859       }
 860       Node* region = new RegionNode(3);
 861       region->init_req(1, kit->control());
 862       region->init_req(2, null_ctl);
 863 
 864       vt = vt->clone_with_phis(&gvn, region);
 865       vt->merge_with(&gvn, null_vt, 2, true);
 866       if (!null_free) {
 867         vt->set_oop(oop);
 868       }
 869       kit->set_control(gvn.transform(region));
 870     }
 871   } else {
 872     // Oop can never be null
 873     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 874     Node* init_ctl = kit->control();
 875     vt->set_is_buffered(gvn);
 876     vt->set_is_init(gvn);
 877     vt->load(kit, oop, oop, vk, visited);
 878 // TODO 8284443
 879 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
 880 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 881   }
 882   assert(vt->is_allocated(&gvn) || (null_free && !vk->is_initialized()), "inline type should be allocated");
 883   kit->record_for_igvn(vt);
 884   return gvn.transform(vt)->as_InlineType();
 885 }
 886 
 887 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 888   GrowableArray<ciType*> visited;
 889   visited.push(vk);
 890   return make_from_flat_impl(kit, vk, obj, ptr, holder, holder_offset, decorators, visited);
 891 }
 892 
 893 // GraphKit wrapper for the 'make_from_flat' method
 894 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 895   if (kit->gvn().type(obj)->isa_aryptr()) {
 896     kit->C->set_flat_accesses();
 897   }
 898   // Create and initialize an InlineTypeNode by loading all field values from
 899   // a flat inline type field at 'holder_offset' or from an inline type array.
 900   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 901   // The inline type is flattened into the object without an oop header. Subtract the
 902   // offset of the first field to account for the missing header when loading the values.
 903   holder_offset -= vk->first_field_offset();
 904   vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators);
 905   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 906   return kit->gvn().transform(vt)->as_InlineType();
 907 }
 908 
 909 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 910   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
 911   if (!in) {
 912     // Keep track of the oop. The returned inline type might already be buffered.
 913     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 914     vt->set_oop(oop);
 915   }
 916   GrowableArray<ciType*> visited;
 917   visited.push(vk);
 918   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
 919   return kit->gvn().transform(vt)->as_InlineType();
 920 }
 921 
 922 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 923   ciInlineKlass* vk = inline_klass();
 924   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 925   for (uint i = 1; i < req(); ++i) {
 926     res->set_req(i, in(i));
 927   }
 928 
 929   if (allocate) {
 930     // Re-execute if buffering triggers deoptimization
 931     PreserveReexecuteState preexecs(kit);
 932     kit->jvms()->set_should_reexecute(true);
 933     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 934     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, true);
 935     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 936     alloc->_larval = true;
 937 
 938     store(kit, alloc_oop, alloc_oop, vk);
 939     res->set_oop(alloc_oop);
 940   }
 941   // TODO 8239003
 942   //res->set_type(TypeInlineType::make(vk, true));
 943   res = kit->gvn().transform(res)->as_InlineType();
 944   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
 945   return res;
 946 }
 947 
 948 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
 949   Node* obj = get_oop();
 950   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
 951   Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 952   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
 953   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
 954 
 955   // Do not let stores that initialize this buffer be reordered with a subsequent
 956   // store that would make this buffer accessible by other threads.
 957   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj);
 958   assert(alloc != nullptr, "must have an allocation node");
 959   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 960 
 961   ciInlineKlass* vk = inline_klass();
 962   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 963   for (uint i = 1; i < req(); ++i) {
 964     res->set_req(i, in(i));
 965   }
 966   // TODO 8239003
 967   //res->set_type(TypeInlineType::make(vk, false));
 968   res = kit->gvn().transform(res)->as_InlineType();
 969   return res;
 970 }
 971 
 972 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const {
 973   if (!is_allocated(gvn)) {
 974     return false;
 975   }
 976 
 977   Node* oop = get_oop();
 978   AllocateNode* alloc = AllocateNode::Ideal_allocation(oop);
 979   return alloc != nullptr && alloc->_larval;
 980 }
 981 
 982 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
 983   if (vk == nullptr) {
 984     vk = inline_klass();
 985   }
 986   if (field_count() == 0 && vk->is_initialized()) {
 987     const Type* tinit = phase->type(in(IsInit));
 988     if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
 989       assert(is_allocated(phase), "must be allocated");
 990       return get_oop();
 991     } else {
 992       // TODO 8284443
 993       return nullptr;
 994     }
 995   }
 996   for (uint i = 0; i < field_count(); ++i) {
 997     int offset = holder_offset + field_offset(i);
 998     Node* value = field_value(i);
 999     if (value->is_InlineType()) {
1000       InlineTypeNode* vt = value->as_InlineType();
1001       if (vt->type()->inline_klass()->is_empty()) {
1002         continue;
1003       } else if (field_is_flat(i) && vt->is_InlineType()) {
1004         // Check inline type field load recursively
1005         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
1006         if (base == nullptr) {
1007           return nullptr;
1008         }
1009         continue;
1010       } else {
1011         value = vt->get_oop();
1012         if (value->Opcode() == Op_CastPP) {
1013           // Skip CastPP
1014           value = value->in(1);
1015         }
1016       }
1017     }
1018     if (value->isa_DecodeN()) {
1019       // Skip DecodeN
1020       value = value->in(1);
1021     }
1022     if (value->isa_Load()) {
1023       // Check if base and offset of field load matches inline type layout
1024       intptr_t loffset = 0;
1025       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1026       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1027         return nullptr;
1028       } else if (base == nullptr) {
1029         // Set base and check if pointer type matches
1030         base = lbase;
1031         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1032         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1033           return nullptr;
1034         }
1035       }
1036     } else {
1037       return nullptr;
1038     }
1039   }
1040   return base;
1041 }
1042 
1043 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1044   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1045   intptr_t bits = tk->get_con();
1046   set_nth_bit(bits, 0);
1047   return gvn.longcon((jlong)bits);
1048 }
1049 
1050 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1051   if (!null_free && in) {
1052     n->init_req(base_input++, get_is_init());
1053   }
1054   for (uint i = 0; i < field_count(); i++) {
1055     Node* arg = field_value(i);
1056     if (field_is_flat(i)) {
1057       // Flat inline type field
1058       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1059     } else {
1060       if (arg->is_InlineType()) {
1061         // Non-flat inline type field
1062         InlineTypeNode* vt = arg->as_InlineType();
1063         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1064         arg = vt->buffer(kit);
1065       }
1066       // Initialize call/return arguments
1067       n->init_req(base_input++, arg);
1068       if (field_type(i)->size() == 2) {
1069         n->init_req(base_input++, kit->top());
1070       }
1071     }
1072   }
1073   // The last argument is used to pass IsInit information to compiled code and not required here.
1074   if (!null_free && !in) {
1075     n->init_req(base_input++, kit->top());
1076   }
1077 }
1078 
1079 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1080   PhaseGVN& gvn = kit->gvn();
1081   Node* is_init = nullptr;
1082   if (!null_free) {
1083     // Nullable inline type
1084     if (in) {
1085       // Set IsInit field
1086       if (multi->is_Start()) {
1087         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1088       } else {
1089         is_init = multi->as_Call()->in(base_input);
1090       }
1091       set_req(IsInit, is_init);
1092       base_input++;
1093     }
1094     // Add a null check to make subsequent loads dependent on
1095     assert(null_check_region == nullptr, "already set");
1096     if (is_init == nullptr) {
1097       // Will only be initialized below, use dummy node for now
1098       is_init = new Node(1);
1099       gvn.set_type_bottom(is_init);
1100     }
1101     Node* null_ctrl = kit->top();
1102     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1103     Node* non_null_ctrl = kit->control();
1104     null_check_region = new RegionNode(3);
1105     null_check_region->init_req(1, non_null_ctrl);
1106     null_check_region->init_req(2, null_ctrl);
1107     null_check_region = gvn.transform(null_check_region);
1108     kit->set_control(null_check_region);
1109   }
1110 
1111   for (uint i = 0; i < field_count(); ++i) {
1112     ciType* type = field_type(i);
1113     Node* parm = nullptr;
1114     if (field_is_flat(i)) {
1115       // Flat inline type field
1116       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
1117       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1118       parm = gvn.transform(vt);
1119     } else {
1120       if (multi->is_Start()) {
1121         assert(in, "return from start?");
1122         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1123       } else if (in) {
1124         parm = multi->as_Call()->in(base_input);
1125       } else {
1126         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1127       }
1128       // Non-flat inline type field
1129       if (type->is_inlinetype()) {
1130         if (null_check_region != nullptr) {
1131           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1132             parm = parm->as_InlineType()->get_oop();
1133           }
1134           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1135           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1136           parm->set_req(2, kit->zerocon(T_OBJECT));
1137           parm = gvn.transform(parm);
1138         }
1139         if (visited.contains(type)) {
1140           kit->C->set_has_circular_inline_type(true);
1141         } else if (!parm->is_InlineType()) {
1142           int old_len = visited.length();
1143           visited.push(type);
1144           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), field_is_null_free(i), visited);
1145           visited.trunc_to(old_len);
1146         }
1147       }
1148       base_input += type->size();
1149     }
1150     assert(parm != nullptr, "should never be null");
1151     assert(field_value(i) == nullptr, "already set");
1152     set_field_value(i, parm);
1153     gvn.record_for_igvn(parm);
1154   }
1155   // The last argument is used to pass IsInit information to compiled code
1156   if (!null_free && !in) {
1157     Node* cmp = is_init->raw_out(0);
1158     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1159     set_req(IsInit, is_init);
1160     gvn.hash_delete(cmp);
1161     cmp->set_req(1, is_init);
1162     gvn.hash_find_insert(cmp);
1163     base_input++;
1164   }
1165 }
1166 
1167 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1168 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1169 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1170   PhaseIterGVN* igvn = &phase->igvn();
1171   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1172   // will be removed anyway and changing the memory chain will confuse other optimizations.
1173   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1174     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1175     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1176       Node* res = alloc->result_cast();
1177       if (res == nullptr || !res->is_CheckCastPP()) {
1178         break; // No unique CheckCastPP
1179       }
1180       assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1181       // Search for a dominating allocation of the same inline type
1182       Node* res_dom = res;
1183       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1184         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1185         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1186           Node* res_other = alloc_other->result_cast();
1187           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1188               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1189             res_dom = res_other;
1190           }
1191         }
1192       }
1193       if (res_dom != res) {
1194         // Replace allocation by dominating one.
1195         replace_allocation(igvn, res, res_dom);
1196         // The result of the dominated allocation is now unused and will be removed
1197         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1198         igvn->_worklist.push(alloc);
1199       }
1200     }
1201   }
1202 }
1203 
1204 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
1205   GrowableArray<ciType*> visited;
1206   visited.push(vk);
1207   return make_null_impl(gvn, vk, visited);
1208 }
1209 
1210 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1211   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1212   vt->set_is_buffered(gvn);
1213   vt->set_is_init(gvn, false);
1214   for (uint i = 0; i < vt->field_count(); i++) {
1215     ciType* ft = vt->field_type(i);
1216     Node* value = gvn.zerocon(ft->basic_type());
1217     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1218       gvn.C->set_has_circular_inline_type(true);
1219     } else if (ft->is_inlinetype()) {
1220       int old_len = visited.length();
1221       visited.push(ft);
1222       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1223       visited.trunc_to(old_len);
1224     }
1225     vt->set_field_value(i, value);
1226   }
1227   return gvn.transform(vt)->as_InlineType();
1228 }
1229 
1230 Node* InlineTypeNode::Identity(PhaseGVN* phase) {
1231   if (get_oop()->is_InlineType()) {
1232     return get_oop();
1233   }
1234   return this;
1235 }
1236 
1237 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1238   Node* oop = get_oop();
1239   const Type* toop = phase->type(oop);
1240 #ifdef ASSERT
1241   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1242     // We are not allocated (anymore) and should therefore not have an instance id
1243     dump(1);
1244     assert(false, "Unbuffered inline type should not have known instance id");
1245   }
1246 #endif
1247   const Type* t = toop->filter_speculative(_type);
1248   if (t->singleton()) {
1249     // Don't replace InlineType by a constant
1250     t = _type;
1251   }
1252   const Type* tinit = phase->type(in(IsInit));
1253   if (tinit == Type::TOP) {
1254     return Type::TOP;
1255   }
1256   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1257     t = t->join_speculative(TypePtr::NOTNULL);
1258   }
1259   return t;
1260 }