1 /*
   2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) {
  39   InlineTypeNode* vt = clone_if_required(gvn, map);
  40   const Type* t = Type::get_const_type(inline_klass());
  41   gvn->set_type(vt, t);
  42   vt->as_InlineType()->set_type(t);
  43 
  44   // Create a PhiNode for merging the oop values
  45   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  46   gvn->set_type(oop, t);
  47   gvn->record_for_igvn(oop);
  48   vt->set_oop(*gvn, oop);
  49 
  50   // Create a PhiNode for merging the is_buffered values
  51   t = Type::get_const_basic_type(T_BOOLEAN);
  52   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  53   gvn->set_type(is_buffered_node, t);
  54   gvn->record_for_igvn(is_buffered_node);
  55   vt->set_req(IsBuffered, is_buffered_node);
  56 
  57   // Create a PhiNode for merging the is_init values
  58   Node* is_init_node;
  59   if (is_init) {
  60     is_init_node = gvn->intcon(1);
  61   } else {
  62     t = Type::get_const_basic_type(T_BOOLEAN);
  63     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  64     gvn->set_type(is_init_node, t);
  65     gvn->record_for_igvn(is_init_node);
  66   }
  67   vt->set_req(IsInit, is_init_node);
  68 
  69   // Create a PhiNode each for merging the field values
  70   for (uint i = 0; i < vt->field_count(); ++i) {
  71     ciType* type = vt->field_type(i);
  72     Node*  value = vt->field_value(i);
  73     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  74     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  75     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  76     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  77     if (type->is_inlinetype() && no_circularity) {
  78       // Handle inline type fields recursively
  79       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  80     } else {
  81       t = Type::get_const_type(type);
  82       value = PhiNode::make(region, value, t);
  83       gvn->set_type(value, t);
  84       gvn->record_for_igvn(value);
  85     }
  86     vt->set_field_value(i, value);
  87   }
  88   gvn->record_for_igvn(vt);
  89   return vt;
  90 }
  91 
  92 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  93 // for the given region (see InlineTypeNode::clone_with_phis).
  94 bool InlineTypeNode::has_phi_inputs(Node* region) {
  95   // Check oop input
  96   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  97 #ifdef ASSERT
  98   if (result) {
  99     // Check all field value inputs for consistency
 100     for (uint i = Values; i < field_count(); ++i) {
 101       Node* n = in(i);
 102       if (n->is_InlineType()) {
 103         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 104       } else {
 105         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 106       }
 107     }
 108   }
 109 #endif
 110   return result;
 111 }
 112 
 113 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 114 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 115   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 116 
 117   // Merge oop inputs
 118   PhiNode* phi = get_oop()->as_Phi();
 119   phi->set_req(pnum, other->get_oop());
 120   if (transform) {
 121     set_oop(*gvn, gvn->transform(phi));
 122   }
 123 
 124   // Merge is_buffered inputs
 125   phi = get_is_buffered()->as_Phi();
 126   phi->set_req(pnum, other->get_is_buffered());
 127   if (transform) {
 128     set_req(IsBuffered, gvn->transform(phi));
 129   }
 130 
 131   // Merge is_init inputs
 132   Node* is_init = get_is_init();
 133   if (is_init->is_Phi()) {
 134     phi = is_init->as_Phi();
 135     phi->set_req(pnum, other->get_is_init());
 136     if (transform) {
 137       set_req(IsInit, gvn->transform(phi));
 138     }
 139   } else {
 140     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 141   }
 142 
 143   // Merge field values
 144   for (uint i = 0; i < field_count(); ++i) {
 145     Node* val1 =        field_value(i);
 146     Node* val2 = other->field_value(i);
 147     if (val1->is_InlineType()) {
 148       if (val2->is_Phi()) {
 149         val2 = gvn->transform(val2);
 150       }
 151       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 152     } else {
 153       assert(val1->is_Phi(), "must be a phi node");
 154       val1->set_req(pnum, val2);
 155     }
 156     if (transform) {
 157       set_field_value(i, gvn->transform(val1));
 158     }
 159   }
 160   return this;
 161 }
 162 
 163 // Adds a new merge path to an inline type node with phi inputs
 164 void InlineTypeNode::add_new_path(Node* region) {
 165   assert(has_phi_inputs(region), "must have phi inputs");
 166 
 167   PhiNode* phi = get_oop()->as_Phi();
 168   phi->add_req(nullptr);
 169   assert(phi->req() == region->req(), "must be same size as region");
 170 
 171   phi = get_is_buffered()->as_Phi();
 172   phi->add_req(nullptr);
 173   assert(phi->req() == region->req(), "must be same size as region");
 174 
 175   phi = get_is_init()->as_Phi();
 176   phi->add_req(nullptr);
 177   assert(phi->req() == region->req(), "must be same size as region");
 178 
 179   for (uint i = 0; i < field_count(); ++i) {
 180     Node* val = field_value(i);
 181     if (val->is_InlineType()) {
 182       val->as_InlineType()->add_new_path(region);
 183     } else {
 184       val->as_Phi()->add_req(nullptr);
 185       assert(val->req() == region->req(), "must be same size as region");
 186     }
 187   }
 188 }
 189 
 190 Node* InlineTypeNode::field_value(uint index) const {
 191   assert(index < field_count(), "index out of bounds");
 192   return in(Values + index);
 193 }
 194 
 195 // Get the value of the field at the given offset.
 196 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 197 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 198   // If the field at 'offset' belongs to a flat inline type field, 'index' refers to the
 199   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 200   int index = inline_klass()->field_index_by_offset(offset);
 201   int sub_offset = offset - field_offset(index);
 202   Node* value = field_value(index);
 203   assert(value != nullptr, "field value not found");
 204   if (recursive && value->is_InlineType()) {
 205     if (field_is_flat(index)) {
 206       // Flat inline type field
 207       InlineTypeNode* vt = value->as_InlineType();
 208       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 209       return vt->field_value_by_offset(sub_offset, recursive);
 210     } else {
 211       assert(sub_offset == 0, "should not have a sub offset");
 212       return value;
 213     }
 214   }
 215   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 216   assert(sub_offset == 0, "offset mismatch");
 217   return value;
 218 }
 219 
 220 void InlineTypeNode::set_field_value(uint index, Node* value) {
 221   assert(index < field_count(), "index out of bounds");
 222   set_req(Values + index, value);
 223 }
 224 
 225 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 226   set_field_value(field_index(offset), value);
 227 }
 228 
 229 int InlineTypeNode::field_offset(uint index) const {
 230   assert(index < field_count(), "index out of bounds");
 231   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 232 }
 233 
 234 uint InlineTypeNode::field_index(int offset) const {
 235   uint i = 0;
 236   for (; i < field_count() && field_offset(i) != offset; i++) { }
 237   assert(i < field_count(), "field not found");
 238   return i;
 239 }
 240 
 241 ciType* InlineTypeNode::field_type(uint index) const {
 242   assert(index < field_count(), "index out of bounds");
 243   return inline_klass()->declared_nonstatic_field_at(index)->type();
 244 }
 245 
 246 bool InlineTypeNode::field_is_flat(uint index) const {
 247   assert(index < field_count(), "index out of bounds");
 248   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 249   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 250   return field->is_flat();
 251 }
 252 
 253 bool InlineTypeNode::field_is_null_free(uint index) const {
 254   assert(index < field_count(), "index out of bounds");
 255   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 256   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 257   return field->is_null_free();
 258 }
 259 
 260 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 261   // We should not scalarize larvals in debug info of their constructor calls because their fields could still be
 262   // updated. If we scalarize and update the fields in the constructor, the updates won't be visible in the caller after
 263   // deoptimization because the scalarized field values are local to the caller. We need to use a buffer to make the
 264   // updates visible to the outside.
 265   if (is_larval() && sfpt->is_CallJava() && sfpt->as_CallJava()->method() != nullptr &&
 266       sfpt->as_CallJava()->method()->is_object_constructor() && bottom_type()->is_inlinetypeptr() &&
 267       sfpt->in(TypeFunc::Parms) == this) {
 268     // Receiver is always buffered because it's passed as oop, see special case in CompiledEntrySignature::compute_calling_conventions().
 269     assert(is_allocated(igvn), "receiver must be allocated");
 270     return;
 271   }
 272 
 273   ciInlineKlass* vk = inline_klass();
 274   uint nfields = vk->nof_nonstatic_fields();
 275   JVMState* jvms = sfpt->jvms();
 276   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 277   assert(jvms != nullptr, "missing JVMS");
 278   uint first_ind = (sfpt->req() - jvms->scloff());
 279   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 280                                                                   nullptr,
 281                                                                   first_ind,
 282                                                                   sfpt->jvms()->depth(),
 283                                                                   nfields);
 284   sobj->init_req(0, igvn->C->root());
 285   // Nullable inline types have an IsInit field that needs
 286   // to be checked before using the field values.
 287   const TypeInt* tinit = igvn->type(get_is_init())->isa_int();
 288   if (tinit != nullptr && !tinit->is_con(1)) {
 289     sfpt->add_req(get_is_init());
 290   } else {
 291     sfpt->add_req(igvn->C->top());
 292   }
 293   // Iterate over the inline type fields in order of increasing
 294   // offset and add the field values to the safepoint.
 295   for (uint j = 0; j < nfields; ++j) {
 296     int offset = vk->nonstatic_field_at(j)->offset_in_bytes();
 297     Node* value = field_value_by_offset(offset, true /* include flat inline type fields */);
 298     if (value->is_InlineType()) {
 299       // Add inline type field to the worklist to process later
 300       worklist.push(value);
 301     }
 302     sfpt->add_req(value);
 303   }
 304   jvms->set_endoff(sfpt->req());
 305   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 306   igvn->rehash_node_delayed(sfpt);
 307   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 308     Node* debug = sfpt->in(i);
 309     if (debug != nullptr && debug->uncast() == this) {
 310       sfpt->set_req(i, sobj);
 311     }
 312   }
 313 }
 314 
 315 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 316   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 317   // in the safepoint to avoid keeping field loads live just for the debug info.
 318   Node* oop = get_oop();
 319   bool use_oop = false;
 320   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 321     Unique_Node_List worklist;
 322     VectorSet visited;
 323     visited.set(oop->_idx);
 324     worklist.push(oop);
 325     use_oop = true;
 326     while (worklist.size() > 0 && use_oop) {
 327       Node* n = worklist.pop();
 328       for (uint i = 1; i < n->req(); i++) {
 329         Node* in = n->in(i);
 330         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 331           worklist.push(in);
 332         } else if (!(in->is_Con() || in->is_Parm())) {
 333           use_oop = false;
 334           break;
 335         }
 336       }
 337     }
 338   } else {
 339     use_oop = allow_oop && is_allocated(igvn) &&
 340               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 341   }
 342 
 343   ResourceMark rm;
 344   Unique_Node_List safepoints;
 345   Unique_Node_List vt_worklist;
 346   Unique_Node_List worklist;
 347   worklist.push(this);
 348   while (worklist.size() > 0) {
 349     Node* n = worklist.pop();
 350     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 351       Node* use = n->fast_out(i);
 352       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 353         safepoints.push(use);
 354       } else if (use->is_ConstraintCast()) {
 355         worklist.push(use);
 356       }
 357     }
 358   }
 359 
 360   // Process all safepoint uses and scalarize inline type
 361   while (safepoints.size() > 0) {
 362     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 363     if (use_oop) {
 364       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 365         Node* debug = sfpt->in(i);
 366         if (debug != nullptr && debug->uncast() == this) {
 367           sfpt->set_req(i, get_oop());
 368         }
 369       }
 370       igvn->rehash_node_delayed(sfpt);
 371     } else {
 372       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 373     }
 374   }
 375   // Now scalarize non-flat fields
 376   for (uint i = 0; i < vt_worklist.size(); ++i) {
 377     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 378     vt->make_scalar_in_safepoints(igvn);
 379   }
 380   if (outcnt() == 0) {
 381     igvn->record_for_igvn(this);
 382   }
 383 }
 384 
 385 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 386   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 387   const TypePtr* adr_type = nullptr;
 388   bool is_array = ary_type != nullptr;
 389   if ((decorators & C2_MISMATCHED) != 0) {
 390     adr_type = TypeRawPtr::BOTTOM;
 391   } else if (is_array) {
 392     // In the case of a flat inline type array, each field has its own slice
 393     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 394   } else {
 395     ciField* field = holder->get_field_by_offset(offset, false);
 396     assert(field != nullptr, "field not found");
 397     adr_type = gvn.C->alias_type(field)->adr_type();
 398   }
 399   return adr_type;
 400 }
 401 
 402 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 403 // of the same type but with different scalarization depth during GVN. This method adjusts the
 404 // scalarization depth to avoid inconsistencies during merging.
 405 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 406   if (!kit->C->has_circular_inline_type()) {
 407     return this;
 408   }
 409   GrowableArray<ciType*> visited;
 410   visited.push(inline_klass());
 411   return adjust_scalarization_depth_impl(kit, visited);
 412 }
 413 
 414 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 415   InlineTypeNode* val = this;
 416   for (uint i = 0; i < field_count(); ++i) {
 417     Node* value = field_value(i);
 418     Node* new_value = value;
 419     ciType* ft = field_type(i);
 420     if (value->is_InlineType()) {
 421       if (!field_is_flat(i) && visited.contains(ft)) {
 422         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 423       } else {
 424         int old_len = visited.length();
 425         visited.push(ft);
 426         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 427         visited.trunc_to(old_len);
 428       }
 429     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 430       int old_len = visited.length();
 431       visited.push(ft);
 432       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), field_is_null_free(i), visited);
 433       visited.trunc_to(old_len);
 434     }
 435     if (value != new_value) {
 436       if (val == this) {
 437         val = clone_if_required(&kit->gvn(), kit->map());
 438       }
 439       val->set_field_value(i, new_value);
 440     }
 441   }
 442   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 443 }
 444 
 445 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) {
 446   // Initialize the inline type by loading its field values from
 447   // memory and adding the values as input edges to the node.
 448   for (uint i = 0; i < field_count(); ++i) {
 449     int offset = holder_offset + field_offset(i);
 450     Node* value = nullptr;
 451     ciType* ft = field_type(i);
 452     bool null_free = field_is_null_free(i);
 453     if (null_free && ft->as_inline_klass()->is_empty()) {
 454       // Loading from a field of an empty inline type. Just return the default instance.
 455       value = make_default_impl(kit->gvn(), ft->as_inline_klass(), visited);
 456     } else if (field_is_flat(i)) {
 457       // Recursively load the flat inline type field
 458       value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators, visited);
 459     } else {
 460       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 461       bool is_array = (oop_ptr->isa_aryptr() != nullptr);
 462       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 463       if (base->is_Con() && !is_array && !mismatched) {
 464         // If the oop to the inline type is constant (static final field), we can
 465         // also treat the fields as constants because the inline type is immutable.
 466         ciObject* constant_oop = oop_ptr->const_oop();
 467         ciField* field = holder->get_field_by_offset(offset, false);
 468         assert(field != nullptr, "field not found");
 469         ciConstant constant = constant_oop->as_instance()->field_value(field);
 470         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 471         assert(con_type != nullptr, "type not found");
 472         value = kit->gvn().transform(kit->makecon(con_type));
 473         // Check type of constant which might be more precise than the static field type
 474         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 475           ft = con_type->inline_klass();
 476           null_free = true;
 477         }
 478       } else {
 479         // Load field value from memory
 480         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 481         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 482         BasicType bt = type2field[ft->basic_type()];
 483         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 484         const Type* val_type = Type::get_const_type(ft);
 485         value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 486       }
 487       // Loading a non-flattened inline type from memory
 488       if (visited.contains(ft)) {
 489         kit->C->set_has_circular_inline_type(true);
 490       } else if (ft->is_inlinetype()) {
 491         int old_len = visited.length();
 492         visited.push(ft);
 493         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), null_free, visited);
 494         visited.trunc_to(old_len);
 495       }
 496     }
 497     set_field_value(i, value);
 498   }
 499 }
 500 
 501 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 502   if (kit->gvn().type(base)->isa_aryptr()) {
 503     kit->C->set_flat_accesses();
 504   }
 505   // The inline type is embedded into the object without an oop header. Subtract the
 506   // offset of the first field to account for the missing header when storing the values.
 507   if (holder == nullptr) {
 508     holder = inline_klass();
 509   }
 510   holder_offset -= inline_klass()->first_field_offset();
 511   store(kit, base, ptr, holder, holder_offset, -1, decorators);
 512 }
 513 
 514 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, int offsetOnly, DecoratorSet decorators) const {
 515   // Write field values to memory
 516   for (uint i = 0; i < field_count(); ++i) {
 517     if (offsetOnly != -1 && offsetOnly != field_offset(i)) continue;
 518     int offset = holder_offset + field_offset(i);
 519     Node* value = field_value(i);
 520     ciType* ft = field_type(i);
 521     if (field_is_flat(i)) {
 522       // Recursively store the flat inline type field
 523       value->as_InlineType()->store_flat(kit, base, ptr, holder, offset, decorators);
 524     } else {
 525       // Store field value to memory
 526       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 527       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 528       BasicType bt = type2field[ft->basic_type()];
 529       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 530       const Type* val_type = Type::get_const_type(ft);
 531       bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr);
 532       kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 533     }
 534   }
 535 }
 536 
 537 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace, bool must_init) {
 538   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 539     // Already buffered
 540     return this;
 541   }
 542 
 543   // Check if inline type is already buffered
 544   Node* not_buffered_ctl = kit->top();
 545   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 546   if (not_buffered_ctl->is_top()) {
 547     // Already buffered
 548     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 549     vt->set_is_buffered(kit->gvn());
 550     vt = kit->gvn().transform(vt)->as_InlineType();
 551     if (safe_for_replace) {
 552       kit->replace_in_map(this, vt);
 553     }
 554     return vt;
 555   }
 556   Node* buffered_ctl = kit->control();
 557   kit->set_control(not_buffered_ctl);
 558 
 559   // Inline type is not buffered, check if it is null.
 560   Node* null_ctl = kit->top();
 561   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 562   bool null_free = null_ctl->is_top();
 563 
 564   RegionNode* region = new RegionNode(4);
 565   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 566 
 567   // InlineType is already buffered
 568   region->init_req(1, buffered_ctl);
 569   oop->init_req(1, not_null_oop);
 570 
 571   // InlineType is null
 572   region->init_req(2, null_ctl);
 573   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 574 
 575   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 576   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 577 
 578   if (!kit->stopped()) {
 579     assert(!is_allocated(&kit->gvn()), "already buffered");
 580     PreserveJVMState pjvms(kit);
 581     ciInlineKlass* vk = inline_klass();
 582     if (vk->is_initialized() && (vk->is_empty() || (is_default(&kit->gvn()) && !is_larval(&kit->gvn()) && !is_larval()))) {
 583       // Don't buffer an empty or default inline type, use the default oop instead
 584       oop->init_req(3, default_oop(kit->gvn(), vk));
 585     } else {
 586       // Allocate and initialize buffer, re-execute on deoptimization.
 587       kit->jvms()->set_bci(kit->bci());
 588       kit->jvms()->set_should_reexecute(true);
 589       kit->kill_dead_locals();
 590       Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 591       Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 592 
 593       if (must_init) {
 594         // Either not a larval or a larval receiver on which we are about to invoke an abstract value class constructor
 595         // or the Object constructor which is not inlined. It is therefore escaping, and we must initialize the buffer
 596         // because we have not done this, yet, for larvals (see else case).
 597         store(kit, alloc_oop, alloc_oop, vk);
 598 
 599         // Do not let stores that initialize this buffer be reordered with a subsequent
 600         // store that would make this buffer accessible by other threads.
 601         AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 602         assert(alloc != nullptr, "must have an allocation node");
 603         kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 604       } else {
 605         // We do not need to initialize the buffer because a larval could still be updated which will create a new buffer.
 606         // Once the larval escapes, we will initialize the buffer (must_init set).
 607         assert(is_larval(), "only larvals can possibly skip the initialization of their buffer");
 608       }
 609       oop->init_req(3, alloc_oop);
 610     }
 611     region->init_req(3, kit->control());
 612     io    ->init_req(3, kit->i_o());
 613     mem   ->init_req(3, kit->merged_memory());
 614   }
 615 
 616   // Update GraphKit
 617   kit->set_control(kit->gvn().transform(region));
 618   kit->set_i_o(kit->gvn().transform(io));
 619   kit->set_all_memory(kit->gvn().transform(mem));
 620   kit->record_for_igvn(region);
 621   kit->record_for_igvn(oop);
 622   kit->record_for_igvn(io);
 623   kit->record_for_igvn(mem);
 624 
 625   // Use cloned InlineTypeNode to propagate oop from now on
 626   Node* res_oop = kit->gvn().transform(oop);
 627   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 628   vt->set_oop(kit->gvn(), res_oop);
 629   vt->set_is_buffered(kit->gvn());
 630   vt = kit->gvn().transform(vt)->as_InlineType();
 631   if (safe_for_replace) {
 632     kit->replace_in_map(this, vt);
 633   }
 634   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 635   // Make sure it gets a chance to remove this allocation.
 636   kit->C->set_has_split_ifs(true);
 637   return vt;
 638 }
 639 
 640 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 641   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 642     return true;
 643   }
 644   Node* oop = get_oop();
 645   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 646   return !oop_type->maybe_null();
 647 }
 648 
 649 // When a call returns multiple values, it has several result
 650 // projections, one per field. Replacing the result of the call by an
 651 // inline type node (after late inlining) requires that for each result
 652 // projection, we find the corresponding inline type field.
 653 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 654   ciInlineKlass* vk = inline_klass();
 655   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 656     ProjNode* pn = call->fast_out(i)->as_Proj();
 657     uint con = pn->_con;
 658     Node* field = nullptr;
 659     if (con == TypeFunc::Parms) {
 660       field = get_oop();
 661     } else if (con == (call->tf()->range_cc()->cnt() - 1)) {
 662       field = get_is_init();
 663     } else if (con > TypeFunc::Parms) {
 664       uint field_nb = con - (TypeFunc::Parms+1);
 665       int extra = 0;
 666       for (uint j = 0; j < field_nb - extra; j++) {
 667         ciField* f = vk->nonstatic_field_at(j);
 668         BasicType bt = f->type()->basic_type();
 669         if (bt == T_LONG || bt == T_DOUBLE) {
 670           extra++;
 671         }
 672       }
 673       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 674       field = field_value_by_offset(f->offset_in_bytes(), true);
 675     }
 676     if (field != nullptr) {
 677       C->gvn_replace_by(pn, field);
 678       C->initial_gvn()->hash_delete(pn);
 679       pn->set_req(0, C->top());
 680       --i; --imax;
 681     }
 682   }
 683 }
 684 
 685 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 686   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 687   for (uint i = 0; i < field_count(); i++) {
 688      Node* value = field_value(i);
 689      if (field_is_flat(i)) {
 690        // Flat inline type field
 691        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 692      } else if (value->is_InlineType()) {
 693        // Non-flat inline type field
 694        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 695      }
 696   }
 697   vt = kit->gvn().transform(vt)->as_InlineType();
 698   kit->replace_in_map(this, vt);
 699   return vt;
 700 }
 701 
 702 // Replace a buffer allocation by a dominating allocation
 703 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 704   // Remove initializing stores and GC barriers
 705   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 706     Node* use = res->fast_out(i);
 707     if (use->is_AddP()) {
 708       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 709         Node* store = use->fast_out(j)->isa_Store();
 710         if (store != nullptr) {
 711           igvn->rehash_node_delayed(store);
 712           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 713         }
 714       }
 715     } else if (use->Opcode() == Op_CastP2X) {
 716       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 717         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 718         // we store into, as well as the value we are storing. Skip if this is a
 719         // barrier for storing 'res' into another object.
 720         continue;
 721       }
 722       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 723       bs->eliminate_gc_barrier(igvn, use);
 724       --i; --imax;
 725     }
 726   }
 727   igvn->replace_node(res, dom);
 728 }
 729 
 730 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 731   Node* oop = get_oop();
 732   const TypeInt* tinit = phase->type(get_is_init())->isa_int();
 733   if ((tinit != nullptr && tinit->is_con(1)) &&
 734       ((is_default(phase) && !is_larval(phase) && !is_larval()) || inline_klass()->is_empty()) &&
 735       inline_klass()->is_initialized() &&
 736       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 737     // Use the pre-allocated oop for null-free default or empty inline types
 738     set_oop(*phase, default_oop(*phase, inline_klass()));
 739     assert(is_allocated(phase), "should now be allocated");
 740     return this;
 741   }
 742   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 743     InlineTypeNode* vtptr = oop->as_InlineType();
 744     set_oop(*phase, vtptr->get_oop());
 745     set_is_buffered(*phase);
 746     set_is_init(*phase);
 747     for (uint i = Values; i < vtptr->req(); ++i) {
 748       set_req(i, vtptr->in(i));
 749     }
 750     return this;
 751   }
 752 
 753   // Use base oop if fields are loaded from memory
 754   Node* base = is_loaded(phase);
 755   if (base != nullptr && get_oop() != base && !phase->type(base)->maybe_null()) {
 756     set_oop(*phase, base);
 757     assert(is_allocated(phase), "should now be allocated");
 758     return this;
 759   }
 760 
 761   if (can_reshape) {
 762     PhaseIterGVN* igvn = phase->is_IterGVN();
 763     if (is_allocated(phase)) {
 764       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 765       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 766       // This can happen with late inlining when we first allocate an inline type argument
 767       // but later decide to inline the call after the callee code also triggered allocation.
 768       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 769         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 770         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 771           // Found a re-allocation
 772           Node* res = alloc->result_cast();
 773           if (res != nullptr && res->is_CheckCastPP()) {
 774             // Replace allocation by oop and unlink AllocateNode
 775             replace_allocation(igvn, res, oop);
 776             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 777             --i; --imax;
 778           }
 779         }
 780       }
 781     }
 782   }
 783 
 784   return nullptr;
 785 }
 786 
 787 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 788   // Create a new InlineTypeNode with uninitialized values and nullptr oop
 789   bool use_default_oop = vk->is_empty() && vk->is_initialized() && null_free;
 790   Node* oop = use_default_oop ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 791   InlineTypeNode* vt = new InlineTypeNode(vk, oop, null_free);
 792   vt->set_is_buffered(gvn, use_default_oop);
 793   vt->set_is_init(gvn);
 794   return vt;
 795 }
 796 
 797 Node* InlineTypeNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 798   // Returns the constant oop of the default inline type allocation
 799   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 800 }
 801 
 802 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval) {
 803   GrowableArray<ciType*> visited;
 804   visited.push(vk);
 805   return make_default_impl(gvn, vk, visited, is_larval);
 806 }
 807 
 808 InlineTypeNode* InlineTypeNode::make_default_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) {
 809   // Create a new InlineTypeNode with default values
 810   Node* oop = vk->is_initialized() && !is_larval ? default_oop(gvn, vk) : gvn.zerocon(T_OBJECT);
 811   InlineTypeNode* vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 812   vt->set_is_buffered(gvn, vk->is_initialized() && !is_larval);
 813   vt->set_is_init(gvn);
 814   vt->set_is_larval(is_larval);
 815   for (uint i = 0; i < vt->field_count(); ++i) {
 816     ciType* ft = vt->field_type(i);
 817     Node* value = gvn.zerocon(ft->basic_type());
 818     if (!vt->field_is_flat(i) && visited.contains(ft)) {
 819       gvn.C->set_has_circular_inline_type(true);
 820     } else if (ft->is_inlinetype()) {
 821       int old_len = visited.length();
 822       visited.push(ft);
 823       ciInlineKlass* vk = ft->as_inline_klass();
 824       if (vt->field_is_null_free(i)) {
 825         value = make_default_impl(gvn, vk, visited);
 826       } else {
 827         value = make_null_impl(gvn, vk, visited);
 828       }
 829       visited.trunc_to(old_len);
 830     }
 831     vt->set_field_value(i, value);
 832   }
 833   vt = gvn.transform(vt)->as_InlineType();
 834   assert(vt->is_default(&gvn), "must be the default inline type");
 835   return vt;
 836 }
 837 
 838 bool InlineTypeNode::is_default(PhaseGVN* gvn) const {
 839   const TypeInt* tinit = gvn->type(get_is_init())->isa_int();
 840   if (tinit == nullptr || !tinit->is_con(1)) {
 841     return false; // May be null
 842   }
 843   for (uint i = 0; i < field_count(); ++i) {
 844     Node* value = field_value(i);
 845     if (field_is_null_free(i)) {
 846       // Null-free value class field must have the default value
 847       if (!value->is_InlineType() || !value->as_InlineType()->is_default(gvn)) {
 848         return false;
 849       }
 850       continue;
 851     } else if (value->is_InlineType()) {
 852       // Nullable value class field must be null
 853       tinit = gvn->type(value->as_InlineType()->get_is_init())->isa_int();
 854       if (tinit != nullptr && tinit->is_con(0)) {
 855         continue;
 856       }
 857       return false;
 858     }
 859     if (!gvn->type(value)->is_zero_type()) {
 860       return false;
 861     }
 862   }
 863   return true;
 864 }
 865 
 866 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, bool is_larval) {
 867   GrowableArray<ciType*> visited;
 868   visited.push(vk);
 869   return make_from_oop_impl(kit, oop, vk, null_free, visited, is_larval);
 870 }
 871 
 872 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, GrowableArray<ciType*>& visited, bool is_larval) {
 873   PhaseGVN& gvn = kit->gvn();
 874 
 875   if (!is_larval && vk->is_empty() && null_free) {
 876     InlineTypeNode* def = make_default_impl(gvn, vk, visited);
 877     kit->record_for_igvn(def);
 878     return def;
 879   }
 880   // Create and initialize an InlineTypeNode by loading all field
 881   // values from a heap-allocated version and also save the oop.
 882   InlineTypeNode* vt = nullptr;
 883 
 884   if (oop->isa_InlineType()) {
 885     // TODO 8335256 Re-enable assert and fix OSR code
 886     // Issue triggers with TestValueConstruction.java and -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch -XX:-TieredCompilation
 887     // assert(!is_larval || oop->as_InlineType()->is_larval(), "must be larval");
 888     if (is_larval && !oop->as_InlineType()->is_larval()) {
 889       vt = oop->clone()->as_InlineType();
 890       vt->set_is_larval(true);
 891       return gvn.transform(vt)->as_InlineType();
 892     }
 893     return oop->as_InlineType();
 894   } else if (gvn.type(oop)->maybe_null()) {
 895     // Add a null check because the oop may be null
 896     Node* null_ctl = kit->top();
 897     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 898     if (kit->stopped()) {
 899       // Constant null
 900       kit->set_control(null_ctl);
 901       if (null_free) {
 902         vt = make_default_impl(gvn, vk, visited);
 903       } else {
 904         vt = make_null_impl(gvn, vk, visited);
 905       }
 906       kit->record_for_igvn(vt);
 907       return vt;
 908     }
 909     vt = new InlineTypeNode(vk, not_null_oop, null_free);
 910     vt->set_is_buffered(gvn);
 911     vt->set_is_init(gvn);
 912     vt->set_is_larval(is_larval);
 913     vt->load(kit, not_null_oop, not_null_oop, vk, visited);
 914 
 915     if (null_ctl != kit->top()) {
 916       InlineTypeNode* null_vt = nullptr;
 917       if (null_free) {
 918         null_vt = make_default_impl(gvn, vk, visited);
 919       } else {
 920         null_vt = make_null_impl(gvn, vk, visited);
 921       }
 922       Node* region = new RegionNode(3);
 923       region->init_req(1, kit->control());
 924       region->init_req(2, null_ctl);
 925       vt = vt->clone_with_phis(&gvn, region, kit->map());
 926       vt->merge_with(&gvn, null_vt, 2, true);
 927       if (!null_free) {
 928         vt->set_oop(gvn, oop);
 929       }
 930       kit->set_control(gvn.transform(region));
 931     }
 932   } else {
 933     // Oop can never be null
 934     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 935     Node* init_ctl = kit->control();
 936     vt->set_is_buffered(gvn);
 937     vt->set_is_init(gvn);
 938     vt->set_is_larval(is_larval);
 939     vt->load(kit, oop, oop, vk, visited);
 940 // TODO 8284443
 941 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
 942 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 943   }
 944   assert(vt->is_allocated(&gvn) || (null_free && !vk->is_initialized()), "inline type should be allocated");
 945   kit->record_for_igvn(vt);
 946   return gvn.transform(vt)->as_InlineType();
 947 }
 948 
 949 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 950   GrowableArray<ciType*> visited;
 951   visited.push(vk);
 952   return make_from_flat_impl(kit, vk, obj, ptr, holder, holder_offset, decorators, visited);
 953 }
 954 
 955 // GraphKit wrapper for the 'make_from_flat' method
 956 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 957   if (kit->gvn().type(obj)->isa_aryptr()) {
 958     kit->C->set_flat_accesses();
 959   }
 960   // Create and initialize an InlineTypeNode by loading all field values from
 961   // a flat inline type field at 'holder_offset' or from an inline type array.
 962   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 963   // The inline type is flattened into the object without an oop header. Subtract the
 964   // offset of the first field to account for the missing header when loading the values.
 965   holder_offset -= vk->first_field_offset();
 966   vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators);
 967   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 968   return kit->gvn().transform(vt)->as_InlineType();
 969 }
 970 
 971 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 972   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
 973   if (!in) {
 974     // Keep track of the oop. The returned inline type might already be buffered.
 975     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 976     vt->set_oop(kit->gvn(), oop);
 977   }
 978   GrowableArray<ciType*> visited;
 979   visited.push(vk);
 980   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
 981   return kit->gvn().transform(vt)->as_InlineType();
 982 }
 983 
 984 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 985   ciInlineKlass* vk = inline_klass();
 986   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 987   for (uint i = 1; i < req(); ++i) {
 988     res->set_req(i, in(i));
 989   }
 990 
 991   if (allocate) {
 992     // Re-execute if buffering triggers deoptimization
 993     PreserveReexecuteState preexecs(kit);
 994     kit->jvms()->set_should_reexecute(true);
 995     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 996     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, true);
 997     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 998     alloc->_larval = true;
 999 
1000     store(kit, alloc_oop, alloc_oop, vk);
1001     res->set_oop(kit->gvn(), alloc_oop);
1002   }
1003   // TODO 8239003
1004   //res->set_type(TypeInlineType::make(vk, true));
1005   res = kit->gvn().transform(res)->as_InlineType();
1006   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
1007   return res;
1008 }
1009 
1010 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
1011   Node* obj = get_oop();
1012   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
1013   Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1014   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
1015   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
1016 
1017   // Do not let stores that initialize this buffer be reordered with a subsequent
1018   // store that would make this buffer accessible by other threads.
1019   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj);
1020   assert(alloc != nullptr, "must have an allocation node");
1021   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1022 
1023   ciInlineKlass* vk = inline_klass();
1024   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
1025   for (uint i = 1; i < req(); ++i) {
1026     res->set_req(i, in(i));
1027   }
1028   // TODO 8239003
1029   //res->set_type(TypeInlineType::make(vk, false));
1030   res = kit->gvn().transform(res)->as_InlineType();
1031   return res;
1032 }
1033 
1034 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const {
1035   if (!is_allocated(gvn)) {
1036     return false;
1037   }
1038 
1039   Node* oop = get_oop();
1040   AllocateNode* alloc = AllocateNode::Ideal_allocation(oop);
1041   return alloc != nullptr && alloc->_larval;
1042 }
1043 
1044 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1045   if (is_larval() || is_larval(phase)) {
1046     return nullptr;
1047   }
1048   if (vk == nullptr) {
1049     vk = inline_klass();
1050   }
1051   if (field_count() == 0 && vk->is_initialized()) {
1052     const TypeInt* tinit = phase->type(get_is_init())->isa_int();
1053     if (tinit != nullptr && tinit->is_con(1)) {
1054       assert(is_allocated(phase), "must be allocated");
1055       return get_oop();
1056     } else {
1057       // TODO 8284443
1058       return nullptr;
1059     }
1060   }
1061   for (uint i = 0; i < field_count(); ++i) {
1062     int offset = holder_offset + field_offset(i);
1063     Node* value = field_value(i);
1064     if (value->is_InlineType()) {
1065       InlineTypeNode* vt = value->as_InlineType();
1066       if (vt->type()->inline_klass()->is_empty()) {
1067         continue;
1068       } else if (field_is_flat(i) && vt->is_InlineType()) {
1069         // Check inline type field load recursively
1070         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
1071         if (base == nullptr) {
1072           return nullptr;
1073         }
1074         continue;
1075       } else {
1076         value = vt->get_oop();
1077         if (value->Opcode() == Op_CastPP) {
1078           // Skip CastPP
1079           value = value->in(1);
1080         }
1081       }
1082     }
1083     if (value->isa_DecodeN()) {
1084       // Skip DecodeN
1085       value = value->in(1);
1086     }
1087     if (value->isa_Load()) {
1088       // Check if base and offset of field load matches inline type layout
1089       intptr_t loffset = 0;
1090       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1091       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1092         return nullptr;
1093       } else if (base == nullptr) {
1094         // Set base and check if pointer type matches
1095         base = lbase;
1096         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1097         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1098           return nullptr;
1099         }
1100       }
1101     } else {
1102       return nullptr;
1103     }
1104   }
1105   return base;
1106 }
1107 
1108 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1109   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1110   intptr_t bits = tk->get_con();
1111   set_nth_bit(bits, 0);
1112   return gvn.longcon((jlong)bits);
1113 }
1114 
1115 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1116   if (!null_free && in) {
1117     n->init_req(base_input++, get_is_init());
1118   }
1119   for (uint i = 0; i < field_count(); i++) {
1120     Node* arg = field_value(i);
1121     if (field_is_flat(i)) {
1122       // Flat inline type field
1123       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1124     } else {
1125       if (arg->is_InlineType()) {
1126         // Non-flat inline type field
1127         InlineTypeNode* vt = arg->as_InlineType();
1128         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1129         arg = vt->buffer(kit);
1130       }
1131       // Initialize call/return arguments
1132       n->init_req(base_input++, arg);
1133       if (field_type(i)->size() == 2) {
1134         n->init_req(base_input++, kit->top());
1135       }
1136     }
1137   }
1138   // The last argument is used to pass IsInit information to compiled code and not required here.
1139   if (!null_free && !in) {
1140     n->init_req(base_input++, kit->top());
1141   }
1142 }
1143 
1144 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1145   PhaseGVN& gvn = kit->gvn();
1146   Node* is_init = nullptr;
1147   if (!null_free) {
1148     // Nullable inline type
1149     if (in) {
1150       // Set IsInit field
1151       if (multi->is_Start()) {
1152         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1153       } else {
1154         is_init = multi->as_Call()->in(base_input);
1155       }
1156       set_req(IsInit, is_init);
1157       base_input++;
1158     }
1159     // Add a null check to make subsequent loads dependent on
1160     assert(null_check_region == nullptr, "already set");
1161     if (is_init == nullptr) {
1162       // Will only be initialized below, use dummy node for now
1163       is_init = new Node(1);
1164       is_init->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1165       gvn.set_type_bottom(is_init);
1166     }
1167     Node* null_ctrl = kit->top();
1168     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1169     Node* non_null_ctrl = kit->control();
1170     null_check_region = new RegionNode(3);
1171     null_check_region->init_req(1, non_null_ctrl);
1172     null_check_region->init_req(2, null_ctrl);
1173     null_check_region = gvn.transform(null_check_region);
1174     kit->set_control(null_check_region);
1175   }
1176 
1177   for (uint i = 0; i < field_count(); ++i) {
1178     ciType* type = field_type(i);
1179     Node* parm = nullptr;
1180     if (field_is_flat(i)) {
1181       // Flat inline type field
1182       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
1183       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1184       parm = gvn.transform(vt);
1185     } else {
1186       if (multi->is_Start()) {
1187         assert(in, "return from start?");
1188         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1189       } else if (in) {
1190         parm = multi->as_Call()->in(base_input);
1191       } else {
1192         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1193       }
1194       // Non-flat inline type field
1195       if (type->is_inlinetype()) {
1196         if (null_check_region != nullptr) {
1197           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1198           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1199           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1200           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1201             parm = parm->as_InlineType()->get_oop();
1202           }
1203           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1204           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1205           parm->set_req(2, kit->zerocon(T_OBJECT));
1206           parm = gvn.transform(parm);
1207         }
1208         if (visited.contains(type)) {
1209           kit->C->set_has_circular_inline_type(true);
1210         } else if (!parm->is_InlineType()) {
1211           int old_len = visited.length();
1212           visited.push(type);
1213           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), field_is_null_free(i), visited);
1214           visited.trunc_to(old_len);
1215         }
1216       }
1217       base_input += type->size();
1218     }
1219     assert(parm != nullptr, "should never be null");
1220     assert(field_value(i) == nullptr, "already set");
1221     set_field_value(i, parm);
1222     gvn.record_for_igvn(parm);
1223   }
1224   // The last argument is used to pass IsInit information to compiled code
1225   if (!null_free && !in) {
1226     Node* cmp = is_init->raw_out(0);
1227     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1228     set_req(IsInit, is_init);
1229     gvn.hash_delete(cmp);
1230     cmp->set_req(1, is_init);
1231     gvn.hash_find_insert(cmp);
1232     gvn.record_for_igvn(cmp);
1233     base_input++;
1234   }
1235 }
1236 
1237 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1238 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1239 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1240   // TODO 8332886 Really needed? GVN is disabled anyway.
1241   if (is_larval()) {
1242     return;
1243   }
1244   PhaseIterGVN* igvn = &phase->igvn();
1245   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1246   // will be removed anyway and changing the memory chain will confuse other optimizations.
1247   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1248     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1249     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1250       Node* res = alloc->result_cast();
1251       if (res == nullptr || !res->is_CheckCastPP()) {
1252         break; // No unique CheckCastPP
1253       }
1254       assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1255       // Search for a dominating allocation of the same inline type
1256       Node* res_dom = res;
1257       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1258         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1259         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1260           Node* res_other = alloc_other->result_cast();
1261           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1262               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1263             res_dom = res_other;
1264           }
1265         }
1266       }
1267       if (res_dom != res) {
1268         // Replace allocation by dominating one.
1269         replace_allocation(igvn, res, res_dom);
1270         // The result of the dominated allocation is now unused and will be removed
1271         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1272         igvn->_worklist.push(alloc);
1273       }
1274     }
1275   }
1276 }
1277 
1278 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1279   GrowableArray<ciType*> visited;
1280   visited.push(vk);
1281   return make_null_impl(gvn, vk, visited, transform);
1282 }
1283 
1284 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1285   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1286   vt->set_is_buffered(gvn);
1287   vt->set_is_init(gvn, false);
1288   for (uint i = 0; i < vt->field_count(); i++) {
1289     ciType* ft = vt->field_type(i);
1290     Node* value = gvn.zerocon(ft->basic_type());
1291     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1292       gvn.C->set_has_circular_inline_type(true);
1293     } else if (ft->is_inlinetype()) {
1294       int old_len = visited.length();
1295       visited.push(ft);
1296       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1297       visited.trunc_to(old_len);
1298     }
1299     vt->set_field_value(i, value);
1300   }
1301   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1302 }
1303 
1304 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1305   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1306     return clone()->as_InlineType();
1307   }
1308   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1309     if (fast_out(i) != map) {
1310       return clone()->as_InlineType();
1311     }
1312   }
1313   gvn->hash_delete(this);
1314   return this;
1315 }
1316 
1317 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1318   Node* oop = get_oop();
1319   const Type* toop = phase->type(oop);
1320 #ifdef ASSERT
1321   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1322     // We are not allocated (anymore) and should therefore not have an instance id
1323     dump(1);
1324     assert(false, "Unbuffered inline type should not have known instance id");
1325   }
1326 #endif
1327   const Type* t = toop->filter_speculative(_type);
1328   if (t->singleton()) {
1329     // Don't replace InlineType by a constant
1330     t = _type;
1331   }
1332   const Type* tinit = phase->type(in(IsInit));
1333   if (tinit == Type::TOP) {
1334     return Type::TOP;
1335   }
1336   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1337     t = t->join_speculative(TypePtr::NOTNULL);
1338   }
1339   return t;
1340 }
1341 
1342 #ifndef PRODUCT
1343 void InlineTypeNode::dump_spec(outputStream* st) const {
1344   if (_is_larval) {
1345     st->print(" #larval");
1346   }
1347 }
1348 #endif // NOT PRODUCT