1 /*
   2  * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, bool is_init) {
  39   InlineTypeNode* vt = clone()->as_InlineType();
  40   const Type* t = Type::get_const_type(inline_klass());
  41   gvn->set_type(vt, t);
  42   vt->as_InlineType()->set_type(t);
  43 
  44   // Create a PhiNode for merging the oop values
  45   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  46   gvn->set_type(oop, t);
  47   gvn->record_for_igvn(oop);
  48   vt->set_oop(oop);
  49 
  50   // Create a PhiNode for merging the is_buffered values
  51   t = Type::get_const_basic_type(T_BOOLEAN);
  52   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  53   gvn->set_type(is_buffered_node, t);
  54   gvn->record_for_igvn(is_buffered_node);
  55   vt->set_req(IsBuffered, is_buffered_node);
  56 
  57   // Create a PhiNode for merging the is_init values
  58   Node* is_init_node;
  59   if (is_init) {
  60     is_init_node = gvn->intcon(1);
  61   } else {
  62     t = Type::get_const_basic_type(T_BOOLEAN);
  63     is_init_node = PhiNode::make(region, vt->get_is_init(), t);
  64     gvn->set_type(is_init_node, t);
  65     gvn->record_for_igvn(is_init_node);
  66   }
  67   vt->set_req(IsInit, is_init_node);
  68 
  69   // Create a PhiNode each for merging the field values
  70   for (uint i = 0; i < vt->field_count(); ++i) {
  71     ciType* type = vt->field_type(i);
  72     Node*  value = vt->field_value(i);
  73     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  74     // of the same type but with different scalarization depth during IGVN. To avoid inconsistencies
  75     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  76     bool no_circularity = !gvn->C->has_circular_inline_type() || !gvn->is_IterGVN() || field_is_flattened(i);
  77     if (value->is_InlineType() && no_circularity) {
  78       // Handle inline type fields recursively
  79       value = value->as_InlineType()->clone_with_phis(gvn, region);
  80     } else {
  81       t = Type::get_const_type(type);
  82       value = PhiNode::make(region, value, t);
  83       gvn->set_type(value, t);
  84       gvn->record_for_igvn(value);
  85     }
  86     vt->set_field_value(i, value);
  87   }
  88   gvn->record_for_igvn(vt);
  89   return vt;
  90 }
  91 
  92 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  93 // for the given region (see InlineTypeNode::clone_with_phis).
  94 bool InlineTypeNode::has_phi_inputs(Node* region) {
  95   // Check oop input
  96   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  97 #ifdef ASSERT
  98   if (result) {
  99     // Check all field value inputs for consistency
 100     for (uint i = Values; i < field_count(); ++i) {
 101       Node* n = in(i);
 102       if (n->is_InlineType()) {
 103         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 104       } else {
 105         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 106       }
 107     }
 108   }
 109 #endif
 110   return result;
 111 }
 112 
 113 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 114 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 115   // Merge oop inputs
 116   PhiNode* phi = get_oop()->as_Phi();
 117   phi->set_req(pnum, other->get_oop());
 118   if (transform) {
 119     set_oop(gvn->transform(phi));
 120   }
 121 
 122   // Merge is_buffered inputs
 123   phi = get_is_buffered()->as_Phi();
 124   phi->set_req(pnum, other->get_is_buffered());
 125   if (transform) {
 126     set_req(IsBuffered, gvn->transform(phi));
 127   }
 128 
 129   // Merge is_init inputs
 130   Node* is_init = get_is_init();
 131   if (is_init->is_Phi()) {
 132     phi = is_init->as_Phi();
 133     phi->set_req(pnum, other->get_is_init());
 134     if (transform) {
 135       set_req(IsInit, gvn->transform(phi));
 136     }
 137   } else {
 138     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 139   }
 140 
 141   // Merge field values
 142   for (uint i = 0; i < field_count(); ++i) {
 143     Node* val1 =        field_value(i);
 144     Node* val2 = other->field_value(i);
 145     if (val1->is_InlineType()) {
 146       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 147     } else {
 148       assert(val1->is_Phi(), "must be a phi node");
 149       val1->set_req(pnum, val2);
 150     }
 151     if (transform) {
 152       set_field_value(i, gvn->transform(val1));
 153     }
 154   }
 155   return this;
 156 }
 157 
 158 // Adds a new merge path to an inline type node with phi inputs
 159 void InlineTypeNode::add_new_path(Node* region) {
 160   assert(has_phi_inputs(region), "must have phi inputs");
 161 
 162   PhiNode* phi = get_oop()->as_Phi();
 163   phi->add_req(NULL);
 164   assert(phi->req() == region->req(), "must be same size as region");
 165 
 166   phi = get_is_buffered()->as_Phi();
 167   phi->add_req(NULL);
 168   assert(phi->req() == region->req(), "must be same size as region");
 169 
 170   phi = get_is_init()->as_Phi();
 171   phi->add_req(NULL);
 172   assert(phi->req() == region->req(), "must be same size as region");
 173 
 174   for (uint i = 0; i < field_count(); ++i) {
 175     Node* val = field_value(i);
 176     if (val->is_InlineType()) {
 177       val->as_InlineType()->add_new_path(region);
 178     } else {
 179       val->as_Phi()->add_req(NULL);
 180       assert(val->req() == region->req(), "must be same size as region");
 181     }
 182   }
 183 }
 184 
 185 Node* InlineTypeNode::field_value(uint index) const {
 186   assert(index < field_count(), "index out of bounds");
 187   return in(Values + index);
 188 }
 189 
 190 // Get the value of the field at the given offset.
 191 // If 'recursive' is true, flattened inline type fields will be resolved recursively.
 192 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 193   // If the field at 'offset' belongs to a flattened inline type field, 'index' refers to the
 194   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 195   int index = inline_klass()->field_index_by_offset(offset);
 196   int sub_offset = offset - field_offset(index);
 197   Node* value = field_value(index);
 198   assert(value != NULL, "field value not found");
 199   if (recursive && value->is_InlineType()) {
 200     if (field_is_flattened(index)) {
 201       // Flattened inline type field
 202       InlineTypeNode* vt = value->as_InlineType();
 203       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 204       return vt->field_value_by_offset(sub_offset, recursive);
 205     } else {
 206       assert(sub_offset == 0, "should not have a sub offset");
 207       return value;
 208     }
 209   }
 210   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 211   assert(sub_offset == 0, "offset mismatch");
 212   return value;
 213 }
 214 
 215 void InlineTypeNode::set_field_value(uint index, Node* value) {
 216   assert(index < field_count(), "index out of bounds");
 217   set_req(Values + index, value);
 218 }
 219 
 220 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 221   set_field_value(field_index(offset), value);
 222 }
 223 
 224 int InlineTypeNode::field_offset(uint index) const {
 225   assert(index < field_count(), "index out of bounds");
 226   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 227 }
 228 
 229 uint InlineTypeNode::field_index(int offset) const {
 230   uint i = 0;
 231   for (; i < field_count() && field_offset(i) != offset; i++) { }
 232   assert(i < field_count(), "field not found");
 233   return i;
 234 }
 235 
 236 ciType* InlineTypeNode::field_type(uint index) const {
 237   assert(index < field_count(), "index out of bounds");
 238   return inline_klass()->declared_nonstatic_field_at(index)->type();
 239 }
 240 
 241 bool InlineTypeNode::field_is_flattened(uint index) const {
 242   assert(index < field_count(), "index out of bounds");
 243   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 244   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 245   return field->is_flattened();
 246 }
 247 
 248 bool InlineTypeNode::field_is_null_free(uint index) const {
 249   assert(index < field_count(), "index out of bounds");
 250   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 251   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 252   return field->is_null_free();
 253 }
 254 
 255 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 256   ciInlineKlass* vk = inline_klass();
 257   uint nfields = vk->nof_nonstatic_fields();
 258   JVMState* jvms = sfpt->jvms();
 259   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 260   assert(jvms != NULL, "missing JVMS");
 261   uint first_ind = (sfpt->req() - jvms->scloff());
 262   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 263 #ifdef ASSERT
 264                                                                   NULL,
 265 #endif
 266                                                                   first_ind, nfields);
 267   sobj->init_req(0, igvn->C->root());
 268   // Nullable inline types have an IsInit field that needs
 269   // to be checked before using the field values.
 270   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 271     sfpt->add_req(get_is_init());
 272   } else {
 273     sfpt->add_req(igvn->C->top());
 274   }
 275   // Iterate over the inline type fields in order of increasing
 276   // offset and add the field values to the safepoint.
 277   for (uint j = 0; j < nfields; ++j) {
 278     int offset = vk->nonstatic_field_at(j)->offset_in_bytes();
 279     Node* value = field_value_by_offset(offset, true /* include flattened inline type fields */);
 280     if (value->is_InlineType()) {
 281       // Add inline type field to the worklist to process later
 282       worklist.push(value);
 283     }
 284     sfpt->add_req(value);
 285   }
 286   jvms->set_endoff(sfpt->req());
 287   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 288   igvn->rehash_node_delayed(sfpt);
 289   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 290     Node* debug = sfpt->in(i);
 291     if (debug != NULL && debug->uncast() == this) {
 292       sfpt->set_req(i, sobj);
 293     }
 294   }
 295 }
 296 
 297 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 298   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 299   // in the safepoint to avoid keeping field loads live just for the debug info.
 300   Node* oop = get_oop();
 301   bool use_oop = allow_oop && is_allocated(igvn) &&
 302                  (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 303 
 304   ResourceMark rm;
 305   Unique_Node_List safepoints;
 306   Unique_Node_List vt_worklist;
 307   Unique_Node_List worklist;
 308   worklist.push(this);
 309   while (worklist.size() > 0) {
 310     Node* n = worklist.pop();
 311     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 312       Node* use = n->fast_out(i);
 313       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 314         safepoints.push(use);
 315       } else if (use->is_ConstraintCast()) {
 316         worklist.push(use);
 317       }
 318     }
 319   }
 320 
 321   // Process all safepoint uses and scalarize inline type
 322   while (safepoints.size() > 0) {
 323     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 324     if (use_oop) {
 325       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 326         Node* debug = sfpt->in(i);
 327         if (debug != NULL && debug->uncast() == this) {
 328           sfpt->set_req(i, get_oop());
 329         }
 330       }
 331       igvn->rehash_node_delayed(sfpt);
 332     } else {
 333       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 334     }
 335   }
 336   // Now scalarize non-flattened fields
 337   for (uint i = 0; i < vt_worklist.size(); ++i) {
 338     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 339     vt->make_scalar_in_safepoints(igvn);
 340   }
 341   if (outcnt() == 0) {
 342     igvn->_worklist.push(this);
 343   }
 344 }
 345 
 346 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 347   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 348   const TypePtr* adr_type = NULL;
 349   bool is_array = ary_type != NULL;
 350   if ((decorators & C2_MISMATCHED) != 0) {
 351     adr_type = TypeRawPtr::BOTTOM;
 352   } else if (is_array) {
 353     // In the case of a flattened inline type array, each field has its own slice
 354     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 355   } else {
 356     ciField* field = holder->get_field_by_offset(offset, false);
 357     assert(field != NULL, "field not found");
 358     adr_type = gvn.C->alias_type(field)->adr_type();
 359   }
 360   return adr_type;
 361 }
 362 
 363 // We limit scalarization for inline types with circular fields and can therefore observe
 364 // nodes of same type but with different scalarization depth during GVN. This method adjusts
 365 // the scalarization depth to avoid inconsistencies during merging.
 366 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 367   if (!kit->C->has_circular_inline_type()) {
 368     return this;
 369   }
 370   GrowableArray<ciType*> visited;
 371   visited.push(inline_klass());
 372   return adjust_scalarization_depth_impl(kit, visited);
 373 }
 374 
 375 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 376   InlineTypeNode* val = this;
 377   for (uint i = 0; i < field_count(); ++i) {
 378     Node* value = field_value(i);
 379     Node* new_value = value;
 380     ciType* ft = field_type(i);
 381     if (value->is_InlineType()) {
 382       if (!field_is_flattened(i) && visited.contains(ft)) {
 383         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 384       } else {
 385         int old_len = visited.length();
 386         visited.push(ft);
 387         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 388         visited.trunc_to(old_len);
 389       }
 390     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 391       int old_len = visited.length();
 392       visited.push(ft);
 393       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), field_is_null_free(i), visited);
 394       visited.trunc_to(old_len);
 395     }
 396     if (value != new_value) {
 397       if (val == this) {
 398         val = clone()->as_InlineType();
 399       }
 400       val->set_field_value(i, new_value);
 401     }
 402   }
 403   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 404 }
 405 
 406 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) {
 407   // Initialize the inline type by loading its field values from
 408   // memory and adding the values as input edges to the node.
 409   for (uint i = 0; i < field_count(); ++i) {
 410     int offset = holder_offset + field_offset(i);
 411     Node* value = NULL;
 412     ciType* ft = field_type(i);
 413     bool null_free = field_is_null_free(i);
 414     if (null_free && ft->as_inline_klass()->is_empty()) {
 415       // Loading from a field of an empty inline type. Just return the default instance.
 416       value = make_default_impl(kit->gvn(), ft->as_inline_klass(), visited);
 417     } else if (field_is_flattened(i)) {
 418       // Recursively load the flattened inline type field
 419       value = make_from_flattened_impl(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators, visited);
 420     } else {
 421       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 422       bool is_array = (oop_ptr->isa_aryptr() != NULL);
 423       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 424       if (base->is_Con() && !is_array && !mismatched) {
 425         // If the oop to the inline type is constant (static final field), we can
 426         // also treat the fields as constants because the inline type is immutable.
 427         ciObject* constant_oop = oop_ptr->const_oop();
 428         ciField* field = holder->get_field_by_offset(offset, false);
 429         assert(field != NULL, "field not found");
 430         ciConstant constant = constant_oop->as_instance()->field_value(field);
 431         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 432         assert(con_type != NULL, "type not found");
 433         value = kit->gvn().transform(kit->makecon(con_type));
 434         // Check type of constant which might be more precise than the static field type
 435         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 436           ft = con_type->inline_klass();
 437           null_free = true;
 438         }
 439       } else {
 440         // Load field value from memory
 441         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 442         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 443         BasicType bt = type2field[ft->basic_type()];
 444         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 445         const Type* val_type = Type::get_const_type(ft);
 446         value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 447       }
 448       // Loading a non-flattened inline type from memory
 449       if (visited.contains(ft)) {
 450         kit->C->set_has_circular_inline_type(true);
 451       } else if (ft->is_inlinetype()) {
 452         int old_len = visited.length();
 453         visited.push(ft);
 454         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), null_free, visited);
 455         visited.trunc_to(old_len);
 456       }
 457     }
 458     set_field_value(i, value);
 459   }
 460 }
 461 
 462 void InlineTypeNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 463   if (kit->gvn().type(base)->isa_aryptr()) {
 464     kit->C->set_flattened_accesses();
 465   }
 466   // The inline type is embedded into the object without an oop header. Subtract the
 467   // offset of the first field to account for the missing header when storing the values.
 468   if (holder == NULL) {
 469     holder = inline_klass();
 470   }
 471   holder_offset -= inline_klass()->first_field_offset();
 472   store(kit, base, ptr, holder, holder_offset, decorators);
 473 }
 474 
 475 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 476   // Write field values to memory
 477   for (uint i = 0; i < field_count(); ++i) {
 478     int offset = holder_offset + field_offset(i);
 479     Node* value = field_value(i);
 480     ciType* ft = field_type(i);
 481     if (field_is_flattened(i)) {
 482       // Recursively store the flattened inline type field
 483       value->as_InlineType()->store_flattened(kit, base, ptr, holder, offset, decorators);
 484     } else {
 485       // Store field value to memory
 486       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 487       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 488       BasicType bt = type2field[ft->basic_type()];
 489       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 490       const Type* val_type = Type::get_const_type(ft);
 491       bool is_array = (kit->gvn().type(base)->isa_aryptr() != NULL);
 492       kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators);
 493     }
 494   }
 495 }
 496 
 497 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 498   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 499     // Already buffered
 500     return this;
 501   }
 502 
 503   // Check if inline type is already buffered
 504   Node* not_buffered_ctl = kit->top();
 505   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 506   if (not_buffered_ctl->is_top()) {
 507     // Already buffered
 508     InlineTypeNode* vt = clone()->as_InlineType();
 509     vt->set_is_buffered(kit->gvn());
 510     vt = kit->gvn().transform(vt)->as_InlineType();
 511     if (safe_for_replace) {
 512       kit->replace_in_map(this, vt);
 513     }
 514     return vt;
 515   }
 516   Node* buffered_ctl = kit->control();
 517   kit->set_control(not_buffered_ctl);
 518 
 519   // Inline type is not buffered, check if it is null.
 520   Node* null_ctl = kit->top();
 521   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 522   bool null_free = null_ctl->is_top();
 523 
 524   RegionNode* region = new RegionNode(4);
 525   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 526 
 527   // InlineType is already buffered
 528   region->init_req(1, buffered_ctl);
 529   oop->init_req(1, not_null_oop);
 530 
 531   // InlineType is null
 532   region->init_req(2, null_ctl);
 533   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 534 
 535   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 536   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 537 
 538   int bci = kit->bci();
 539   bool reexecute = kit->jvms()->should_reexecute();
 540   if (!kit->stopped()) {
 541     assert(!is_allocated(&kit->gvn()), "already buffered");
 542 
 543     // Allocate and initialize buffer
 544     PreserveJVMState pjvms(kit);
 545     // Propagate re-execution state and bci
 546     kit->set_bci(bci);
 547     kit->jvms()->set_bci(bci);
 548     kit->jvms()->set_should_reexecute(reexecute);
 549 
 550     kit->kill_dead_locals();
 551     ciInlineKlass* vk = inline_klass();
 552     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 553     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true, this);
 554     store(kit, alloc_oop, alloc_oop, vk);
 555 
 556     // Do not let stores that initialize this buffer be reordered with a subsequent
 557     // store that would make this buffer accessible by other threads.
 558     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 559     assert(alloc != NULL, "must have an allocation node");
 560     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 561 
 562     region->init_req(3, kit->control());
 563     oop   ->init_req(3, alloc_oop);
 564     io    ->init_req(3, kit->i_o());
 565     mem   ->init_req(3, kit->merged_memory());
 566   }
 567 
 568   // Update GraphKit
 569   kit->set_control(kit->gvn().transform(region));
 570   kit->set_i_o(kit->gvn().transform(io));
 571   kit->set_all_memory(kit->gvn().transform(mem));
 572   kit->record_for_igvn(region);
 573   kit->record_for_igvn(oop);
 574   kit->record_for_igvn(io);
 575   kit->record_for_igvn(mem);
 576 
 577   // Use cloned InlineTypeNode to propagate oop from now on
 578   Node* res_oop = kit->gvn().transform(oop);
 579   InlineTypeNode* vt = clone()->as_InlineType();
 580   vt->set_oop(res_oop);
 581   vt->set_is_buffered(kit->gvn());
 582   vt = kit->gvn().transform(vt)->as_InlineType();
 583   if (safe_for_replace) {
 584     kit->replace_in_map(this, vt);
 585   }
 586   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 587   // Make sure it gets a chance to remove this allocation.
 588   kit->C->set_has_split_ifs(true);
 589   return vt;
 590 }
 591 
 592 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 593   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 594     return true;
 595   }
 596   Node* oop = get_oop();
 597   const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type();
 598   return !oop_type->maybe_null();
 599 }
 600 
 601 // When a call returns multiple values, it has several result
 602 // projections, one per field. Replacing the result of the call by an
 603 // inline type node (after late inlining) requires that for each result
 604 // projection, we find the corresponding inline type field.
 605 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C, bool null_free) {
 606   ciInlineKlass* vk = inline_klass();
 607   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 608     ProjNode* pn = call->fast_out(i)->as_Proj();
 609     uint con = pn->_con;
 610     Node* field = NULL;
 611     if (con == TypeFunc::Parms) {
 612       field = get_oop();
 613     } else if (!null_free && con == (call->tf()->range_cc()->cnt() - 1)) {
 614       field = get_is_init();
 615     } else if (con > TypeFunc::Parms) {
 616       uint field_nb = con - (TypeFunc::Parms+1);
 617       int extra = 0;
 618       for (uint j = 0; j < field_nb - extra; j++) {
 619         ciField* f = vk->nonstatic_field_at(j);
 620         BasicType bt = f->type()->basic_type();
 621         if (bt == T_LONG || bt == T_DOUBLE) {
 622           extra++;
 623         }
 624       }
 625       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 626       field = field_value_by_offset(f->offset_in_bytes(), true);
 627     }
 628     if (field != NULL) {
 629       C->gvn_replace_by(pn, field);
 630       C->initial_gvn()->hash_delete(pn);
 631       pn->set_req(0, C->top());
 632       --i; --imax;
 633     }
 634   }
 635 }
 636 
 637 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 638   InlineTypeNode* vt = clone()->as_InlineType();
 639   for (uint i = 0; i < field_count(); i++) {
 640      Node* value = field_value(i);
 641      if (field_is_flattened(i)) {
 642        // Flattened inline type field
 643        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 644      } else if (value->is_InlineType()) {
 645        // Non-flattened inline type field
 646        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 647      }
 648   }
 649   vt = kit->gvn().transform(vt)->as_InlineType();
 650   kit->replace_in_map(this, vt);
 651   return vt;
 652 }
 653 
 654 // Replace a buffer allocation by a dominating allocation
 655 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 656   // Remove initializing stores and GC barriers
 657   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 658     Node* use = res->fast_out(i);
 659     if (use->is_AddP()) {
 660       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 661         Node* store = use->fast_out(j)->isa_Store();
 662         if (store != NULL) {
 663           igvn->rehash_node_delayed(store);
 664           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 665         }
 666       }
 667     } else if (use->Opcode() == Op_CastP2X) {
 668       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 669         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 670         // we store into, as well as the value we are storing. Skip if this is a
 671         // barrier for storing 'res' into another object.
 672         continue;
 673       }
 674       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 675       bs->eliminate_gc_barrier(igvn, use);
 676       --i; --imax;
 677     }
 678   }
 679   igvn->replace_node(res, dom);
 680 }
 681 
 682 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 683   Node* oop = get_oop();
 684   if (!is_larval(phase) &&
 685       is_default(phase) &&
 686       inline_klass()->is_initialized() &&
 687       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 688     // Use the pre-allocated oop for default inline types
 689     set_oop(default_oop(*phase, inline_klass()));
 690     assert(is_allocated(phase), "should now be allocated");
 691     return this;
 692   }
 693   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 694     InlineTypeNode* vtptr = oop->as_InlineType();
 695     set_oop(vtptr->get_oop());
 696     set_is_buffered(*phase);
 697     set_is_init(*phase);
 698     for (uint i = Values; i < vtptr->req(); ++i) {
 699       set_req(i, vtptr->in(i));
 700     }
 701     return this;
 702   }
 703   if (!is_allocated(phase)) {
 704     // Save base oop if fields are loaded from memory and the inline
 705     // type is not buffered (in this case we should not use the oop).
 706     Node* base = is_loaded(phase);
 707     if (base != NULL && !phase->type(base)->maybe_null()) {
 708       set_oop(base);
 709       assert(is_allocated(phase), "should now be allocated");
 710       return this;
 711     }
 712   }
 713 
 714   if (can_reshape) {
 715     PhaseIterGVN* igvn = phase->is_IterGVN();
 716     if (is_allocated(phase)) {
 717       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 718       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 719       // This can happen with late inlining when we first allocate an inline type argument
 720       // but later decide to inline the call after the callee code also triggered allocation.
 721       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 722         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 723         if (alloc != NULL && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 724           // Found a re-allocation
 725           Node* res = alloc->result_cast();
 726           if (res != NULL && res->is_CheckCastPP()) {
 727             // Replace allocation by oop and unlink AllocateNode
 728             replace_allocation(igvn, res, oop);
 729             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 730             --i; --imax;
 731           }
 732         }
 733       }
 734     }
 735   }
 736 
 737   return NULL;
 738 }
 739 
 740 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 741   // Create a new InlineTypeNode with uninitialized values and NULL oop
 742   Node* oop = (vk->is_empty() && vk->is_initialized()) ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 743   InlineTypeNode* vt = new InlineTypeNode(vk, oop, null_free);
 744   vt->set_is_buffered(gvn, vk->is_empty() && vk->is_initialized());
 745   vt->set_is_init(gvn);
 746   return vt;
 747 }
 748 
 749 Node* InlineTypeNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 750   // Returns the constant oop of the default inline type allocation
 751   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 752 }
 753 
 754 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk) {
 755   GrowableArray<ciType*> visited;
 756   visited.push(vk);
 757   return make_default_impl(gvn, vk, visited);
 758 }
 759 
 760 InlineTypeNode* InlineTypeNode::make_default_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
 761   // Create a new InlineTypeNode with default values
 762   Node* oop = vk->is_initialized() ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 763   InlineTypeNode* vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 764   vt->set_is_buffered(gvn, vk->is_initialized());
 765   vt->set_is_init(gvn);
 766   for (uint i = 0; i < vt->field_count(); ++i) {
 767     ciType* ft = vt->field_type(i);
 768     Node* value = gvn.zerocon(ft->basic_type());
 769     if (!vt->field_is_flattened(i) && visited.contains(ft)) {
 770       gvn.C->set_has_circular_inline_type(true);
 771     } else if (ft->is_inlinetype()) {
 772       int old_len = visited.length();
 773       visited.push(ft);
 774       ciInlineKlass* vk = ft->as_inline_klass();
 775       if (vt->field_is_null_free(i)) {
 776         value = make_default_impl(gvn, vk, visited);
 777       } else {
 778         value = make_null_impl(gvn, vk, visited);
 779       }
 780       visited.trunc_to(old_len);
 781     }
 782     vt->set_field_value(i, value);
 783   }
 784   vt = gvn.transform(vt)->as_InlineType();
 785   assert(vt->is_default(&gvn), "must be the default inline type");
 786   return vt;
 787 }
 788 
 789 bool InlineTypeNode::is_default(PhaseGVN* gvn) const {
 790   const Type* tinit = gvn->type(in(IsInit));
 791   if (!tinit->isa_int() || !tinit->is_int()->is_con(1)) {
 792     return false; // May be null
 793   }
 794   for (uint i = 0; i < field_count(); ++i) {
 795     ciType* ft = field_type(i);
 796     Node* value = field_value(i);
 797     if (field_is_null_free(i)) {
 798       if (!value->is_InlineType() || !value->as_InlineType()->is_default(gvn)) {
 799         return false;
 800       }
 801       continue;
 802     } else if (value->is_InlineType()) {
 803       value = value->as_InlineType()->get_oop();
 804     }
 805     if (!gvn->type(value)->is_zero_type()) {
 806       return false;
 807     }
 808   }
 809   return true;
 810 }
 811 
 812 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free) {
 813   GrowableArray<ciType*> visited;
 814   visited.push(vk);
 815   return make_from_oop_impl(kit, oop, vk, null_free, visited);
 816 }
 817 
 818 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free, GrowableArray<ciType*>& visited) {
 819   PhaseGVN& gvn = kit->gvn();
 820 
 821   if (vk->is_empty() && null_free) {
 822     InlineTypeNode* def = make_default_impl(gvn, vk, visited);
 823     kit->record_for_igvn(def);
 824     return def;
 825   }
 826   // Create and initialize an InlineTypeNode by loading all field
 827   // values from a heap-allocated version and also save the oop.
 828   InlineTypeNode* vt = NULL;
 829 
 830   if (oop->isa_InlineType()) {
 831     return oop->as_InlineType();
 832   } else if (gvn.type(oop)->maybe_null()) {
 833     // Add a null check because the oop may be null
 834     Node* null_ctl = kit->top();
 835     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 836     if (kit->stopped()) {
 837       // Constant null
 838       kit->set_control(null_ctl);
 839       if (null_free) {
 840         vt = make_default_impl(gvn, vk, visited);
 841       } else {
 842         vt = make_null_impl(gvn, vk, visited);
 843       }
 844       kit->record_for_igvn(vt);
 845       return vt;
 846     }
 847     vt = new InlineTypeNode(vk, not_null_oop, null_free);
 848     vt->set_is_buffered(gvn);
 849     vt->set_is_init(gvn);
 850     vt->load(kit, not_null_oop, not_null_oop, vk, visited);
 851 
 852     if (null_ctl != kit->top()) {
 853       InlineTypeNode* null_vt = NULL;
 854       if (null_free) {
 855         null_vt = make_default_impl(gvn, vk, visited);
 856       } else {
 857         null_vt = make_null_impl(gvn, vk, visited);
 858       }
 859       Node* region = new RegionNode(3);
 860       region->init_req(1, kit->control());
 861       region->init_req(2, null_ctl);
 862 
 863       vt = vt->clone_with_phis(&gvn, region);
 864       vt->merge_with(&gvn, null_vt, 2, true);
 865       if (!null_free) {
 866         vt->set_oop(oop);
 867       }
 868       kit->set_control(gvn.transform(region));
 869     }
 870   } else {
 871     // Oop can never be null
 872     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 873     Node* init_ctl = kit->control();
 874     vt->set_is_buffered(gvn);
 875     vt->set_is_init(gvn);
 876     vt->load(kit, oop, oop, vk, visited);
 877 // TODO 8284443
 878 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
 879 //           AllocateNode::Ideal_allocation(oop, &gvn) != NULL || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 880   }
 881   assert(!null_free || vt->is_allocated(&gvn), "inline type should be allocated");
 882   kit->record_for_igvn(vt);
 883   return gvn.transform(vt)->as_InlineType();
 884 }
 885 
 886 InlineTypeNode* InlineTypeNode::make_from_flattened(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 887   GrowableArray<ciType*> visited;
 888   visited.push(vk);
 889   return make_from_flattened_impl(kit, vk, obj, ptr, holder, holder_offset, decorators, visited);
 890 }
 891 
 892 // GraphKit wrapper for the 'make_from_flattened' method
 893 InlineTypeNode* InlineTypeNode::make_from_flattened_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 894   if (kit->gvn().type(obj)->isa_aryptr()) {
 895     kit->C->set_flattened_accesses();
 896   }
 897   // Create and initialize an InlineTypeNode by loading all field values from
 898   // a flattened inline type field at 'holder_offset' or from an inline type array.
 899   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 900   // The inline type is flattened into the object without an oop header. Subtract the
 901   // offset of the first field to account for the missing header when loading the values.
 902   holder_offset -= vk->first_field_offset();
 903   vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators);
 904   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 905   return kit->gvn().transform(vt)->as_InlineType();
 906 }
 907 
 908 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 909   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
 910   if (!in) {
 911     // Keep track of the oop. The returned inline type might already be buffered.
 912     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 913     vt->set_oop(oop);
 914   }
 915   GrowableArray<ciType*> visited;
 916   visited.push(vk);
 917   vt->initialize_fields(kit, multi, base_input, in, null_free, NULL, visited);
 918   return kit->gvn().transform(vt)->as_InlineType();
 919 }
 920 
 921 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 922   ciInlineKlass* vk = inline_klass();
 923   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 924   for (uint i = 1; i < req(); ++i) {
 925     res->set_req(i, in(i));
 926   }
 927 
 928   if (allocate) {
 929     // Re-execute if buffering triggers deoptimization
 930     PreserveReexecuteState preexecs(kit);
 931     kit->jvms()->set_should_reexecute(true);
 932     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 933     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, true);
 934     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 935     alloc->_larval = true;
 936 
 937     store(kit, alloc_oop, alloc_oop, vk);
 938     res->set_oop(alloc_oop);
 939   }
 940   // TODO 8239003
 941   //res->set_type(TypeInlineType::make(vk, true));
 942   res = kit->gvn().transform(res)->as_InlineType();
 943   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
 944   return res;
 945 }
 946 
 947 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
 948   Node* obj = get_oop();
 949   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
 950   Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 951   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
 952   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
 953 
 954   // Do not let stores that initialize this buffer be reordered with a subsequent
 955   // store that would make this buffer accessible by other threads.
 956   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj, &kit->gvn());
 957   assert(alloc != NULL, "must have an allocation node");
 958   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 959 
 960   ciInlineKlass* vk = inline_klass();
 961   InlineTypeNode* res = make_uninitialized(kit->gvn(), vk);
 962   for (uint i = 1; i < req(); ++i) {
 963     res->set_req(i, in(i));
 964   }
 965   // TODO 8239003
 966   //res->set_type(TypeInlineType::make(vk, false));
 967   res = kit->gvn().transform(res)->as_InlineType();
 968   return res;
 969 }
 970 
 971 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const {
 972   if (!is_allocated(gvn)) {
 973     return false;
 974   }
 975 
 976   Node* oop = get_oop();
 977   AllocateNode* alloc = AllocateNode::Ideal_allocation(oop, gvn);
 978   return alloc != NULL && alloc->_larval;
 979 }
 980 
 981 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
 982   if (vk == NULL) {
 983     vk = inline_klass();
 984   }
 985   if (field_count() == 0 && vk->is_initialized()) {
 986     const Type* tinit = phase->type(in(IsInit));
 987     if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
 988       assert(is_allocated(phase), "must be allocated");
 989       return get_oop();
 990     } else {
 991       // TODO 8284443
 992       return NULL;
 993     }
 994   }
 995   for (uint i = 0; i < field_count(); ++i) {
 996     int offset = holder_offset + field_offset(i);
 997     Node* value = field_value(i);
 998     if (value->is_InlineType()) {
 999       InlineTypeNode* vt = value->as_InlineType();
1000       if (vt->type()->inline_klass()->is_empty()) {
1001         continue;
1002       } else if (field_is_flattened(i) && vt->is_InlineType()) {
1003         // Check inline type field load recursively
1004         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
1005         if (base == NULL) {
1006           return NULL;
1007         }
1008         continue;
1009       } else {
1010         value = vt->get_oop();
1011         if (value->Opcode() == Op_CastPP) {
1012           // Skip CastPP
1013           value = value->in(1);
1014         }
1015       }
1016     }
1017     if (value->isa_DecodeN()) {
1018       // Skip DecodeN
1019       value = value->in(1);
1020     }
1021     if (value->isa_Load()) {
1022       // Check if base and offset of field load matches inline type layout
1023       intptr_t loffset = 0;
1024       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1025       if (lbase == NULL || (lbase != base && base != NULL) || loffset != offset) {
1026         return NULL;
1027       } else if (base == NULL) {
1028         // Set base and check if pointer type matches
1029         base = lbase;
1030         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1031         if (vtptr == NULL || !vtptr->instance_klass()->equals(vk)) {
1032           return NULL;
1033         }
1034       }
1035     } else {
1036       return NULL;
1037     }
1038   }
1039   return base;
1040 }
1041 
1042 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1043   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1044   intptr_t bits = tk->get_con();
1045   set_nth_bit(bits, 0);
1046   return gvn.longcon((jlong)bits);
1047 }
1048 
1049 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1050   if (!null_free && in) {
1051     n->init_req(base_input++, get_is_init());
1052   }
1053   for (uint i = 0; i < field_count(); i++) {
1054     Node* arg = field_value(i);
1055     if (field_is_flattened(i)) {
1056       // Flattened inline type field
1057       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1058     } else {
1059       if (arg->is_InlineType()) {
1060         // Non-flattened inline type field
1061         InlineTypeNode* vt = arg->as_InlineType();
1062         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1063         arg = vt->buffer(kit);
1064       }
1065       // Initialize call/return arguments
1066       n->init_req(base_input++, arg);
1067       if (field_type(i)->size() == 2) {
1068         n->init_req(base_input++, kit->top());
1069       }
1070     }
1071   }
1072   // The last argument is used to pass IsInit information to compiled code and not required here.
1073   if (!null_free && !in) {
1074     n->init_req(base_input++, kit->top());
1075   }
1076 }
1077 
1078 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1079   PhaseGVN& gvn = kit->gvn();
1080   Node* is_init = NULL;
1081   if (!null_free) {
1082     // Nullable inline type
1083     if (in) {
1084       // Set IsInit field
1085       if (multi->is_Start()) {
1086         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1087       } else {
1088         is_init = multi->as_Call()->in(base_input);
1089       }
1090       set_req(IsInit, is_init);
1091       base_input++;
1092     }
1093     // Add a null check to make subsequent loads dependent on
1094     assert(null_check_region == NULL, "already set");
1095     if (is_init == NULL) {
1096       // Will only be initialized below, use dummy node for now
1097       is_init = new Node(1);
1098       gvn.set_type_bottom(is_init);
1099     }
1100     Node* null_ctrl = kit->top();
1101     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1102     Node* non_null_ctrl = kit->control();
1103     null_check_region = new RegionNode(3);
1104     null_check_region->init_req(1, non_null_ctrl);
1105     null_check_region->init_req(2, null_ctrl);
1106     null_check_region = gvn.transform(null_check_region);
1107     kit->set_control(null_check_region);
1108   }
1109 
1110   for (uint i = 0; i < field_count(); ++i) {
1111     ciType* type = field_type(i);
1112     Node* parm = NULL;
1113     if (field_is_flattened(i)) {
1114       // Flattened inline type field
1115       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
1116       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1117       parm = gvn.transform(vt);
1118     } else {
1119       if (multi->is_Start()) {
1120         assert(in, "return from start?");
1121         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1122       } else if (in) {
1123         parm = multi->as_Call()->in(base_input);
1124       } else {
1125         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1126       }
1127       // Non-flattened inline type field
1128       if (type->is_inlinetype()) {
1129         if (null_check_region != NULL) {
1130           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1131             parm = parm->as_InlineType()->get_oop();
1132           }
1133           // Holder is nullable, set field to NULL if holder is NULL to avoid loading from uninitialized memory
1134           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1135           parm->set_req(2, kit->zerocon(T_OBJECT));
1136           parm = gvn.transform(parm);
1137         }
1138         if (visited.contains(type)) {
1139           kit->C->set_has_circular_inline_type(true);
1140         } else if (!parm->is_InlineType()) {
1141           int old_len = visited.length();
1142           visited.push(type);
1143           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), field_is_null_free(i), visited);
1144           visited.trunc_to(old_len);
1145         }
1146       }
1147       base_input += type->size();
1148     }
1149     assert(parm != NULL, "should never be null");
1150     assert(field_value(i) == NULL, "already set");
1151     set_field_value(i, parm);
1152     gvn.record_for_igvn(parm);
1153   }
1154   // The last argument is used to pass IsInit information to compiled code
1155   if (!null_free && !in) {
1156     Node* cmp = is_init->raw_out(0);
1157     is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1158     set_req(IsInit, is_init);
1159     gvn.hash_delete(cmp);
1160     cmp->set_req(1, is_init);
1161     gvn.hash_find_insert(cmp);
1162     base_input++;
1163   }
1164 }
1165 
1166 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1167 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1168 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1169   PhaseIterGVN* igvn = &phase->igvn();
1170   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1171   // will be removed anyway and changing the memory chain will confuse other optimizations.
1172   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1173     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1174     if (alloc != NULL && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1175       Node* res = alloc->result_cast();
1176       if (res == NULL || !res->is_CheckCastPP()) {
1177         break; // No unique CheckCastPP
1178       }
1179       assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1180       // Search for a dominating allocation of the same inline type
1181       Node* res_dom = res;
1182       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1183         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1184         if (alloc_other != NULL && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1185           Node* res_other = alloc_other->result_cast();
1186           if (res_other != NULL && res_other->is_CheckCastPP() && res_other != res_dom &&
1187               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1188             res_dom = res_other;
1189           }
1190         }
1191       }
1192       if (res_dom != res) {
1193         // Replace allocation by dominating one.
1194         replace_allocation(igvn, res, res_dom);
1195         // The result of the dominated allocation is now unused and will be removed
1196         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1197         igvn->_worklist.push(alloc);
1198       }
1199     }
1200   }
1201 }
1202 
1203 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
1204   GrowableArray<ciType*> visited;
1205   visited.push(vk);
1206   return make_null_impl(gvn, vk, visited);
1207 }
1208 
1209 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1210   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1211   vt->set_is_buffered(gvn);
1212   vt->set_is_init(gvn, false);
1213   for (uint i = 0; i < vt->field_count(); i++) {
1214     ciType* ft = vt->field_type(i);
1215     Node* value = gvn.zerocon(ft->basic_type());
1216     if (!vt->field_is_flattened(i) && visited.contains(ft)) {
1217       gvn.C->set_has_circular_inline_type(true);
1218     } else if (ft->is_inlinetype()) {
1219       int old_len = visited.length();
1220       visited.push(ft);
1221       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1222       visited.trunc_to(old_len);
1223     }
1224     vt->set_field_value(i, value);
1225   }
1226   return gvn.transform(vt)->as_InlineType();
1227 }
1228 
1229 Node* InlineTypeNode::Identity(PhaseGVN* phase) {
1230   if (get_oop()->is_InlineType()) {
1231     return get_oop();
1232   }
1233   return this;
1234 }
1235 
1236 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1237   Node* oop = get_oop();
1238   const Type* toop = phase->type(oop);
1239 #ifdef ASSERT
1240   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1241     // We are not allocated (anymore) and should therefore not have an instance id
1242     dump(1);
1243     assert(false, "Unbuffered inline type should not have known instance id");
1244   }
1245 #endif
1246   const Type* t = toop->filter_speculative(_type);
1247   if (t->singleton()) {
1248     // Don't replace InlineType by a constant
1249     t = _type;
1250   }
1251   const Type* tinit = phase->type(in(IsInit));
1252   if (tinit == Type::TOP) {
1253     return Type::TOP;
1254   }
1255   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1256     t = t->join_speculative(TypePtr::NOTNULL);
1257   }
1258   return t;
1259 }