1 /*
   2  * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeBaseNode* InlineTypeBaseNode::clone_with_phis(PhaseGVN* gvn, Node* region, bool is_init) {
  39   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
  40   if (vt->is_InlineTypePtr()) {
  41     // Use nullable type
  42     const Type* t = Type::get_const_type(inline_klass());
  43     gvn->set_type(vt, t);
  44     vt->as_InlineTypePtr()->set_type(t);
  45   }
  46 
  47   // Create a PhiNode for merging the oop values
  48   const Type* phi_type = Type::get_const_type(inline_klass());
  49   PhiNode* oop = PhiNode::make(region, vt->get_oop(), phi_type);
  50   gvn->set_type(oop, phi_type);
  51   gvn->record_for_igvn(oop);
  52   vt->set_oop(oop);
  53 
  54   // Create a PhiNode for merging the is_init values
  55   Node* is_init_node;
  56   if (is_init) {
  57     is_init_node = gvn->intcon(1);
  58   } else {
  59     phi_type = Type::get_const_basic_type(T_BOOLEAN);
  60     is_init_node = PhiNode::make(region, vt->get_is_init(), phi_type);
  61     gvn->set_type(is_init_node, phi_type);
  62     gvn->record_for_igvn(is_init_node);
  63   }
  64   vt->set_req(IsInit, is_init_node);
  65 
  66   // Create a PhiNode each for merging the field values
  67   for (uint i = 0; i < vt->field_count(); ++i) {
  68     ciType* type = vt->field_type(i);
  69     Node*  value = vt->field_value(i);
  70     if (value->is_InlineTypeBase()) {
  71       // Handle inline type fields recursively
  72       value = value->as_InlineTypeBase()->clone_with_phis(gvn, region);
  73     } else {
  74       phi_type = Type::get_const_type(type);
  75       value = PhiNode::make(region, value, phi_type);
  76       gvn->set_type(value, phi_type);
  77       gvn->record_for_igvn(value);
  78     }
  79     vt->set_field_value(i, value);
  80   }
  81   gvn->set_type(vt, vt->bottom_type());
  82   gvn->record_for_igvn(vt);
  83   return vt;
  84 }
  85 
  86 // Checks if the inputs of the InlineTypeBaseTypeNode were replaced by PhiNodes
  87 // for the given region (see InlineTypeBaseTypeNode::clone_with_phis).
  88 bool InlineTypeBaseNode::has_phi_inputs(Node* region) {
  89   // Check oop input
  90   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  91 #ifdef ASSERT
  92   if (result) {
  93     // Check all field value inputs for consistency
  94     for (uint i = Values; i < field_count(); ++i) {
  95       Node* n = in(i);
  96       if (n->is_InlineTypeBase()) {
  97         assert(n->as_InlineTypeBase()->has_phi_inputs(region), "inconsistent phi inputs");
  98       } else {
  99         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 100       }
 101     }
 102   }
 103 #endif
 104   return result;
 105 }
 106 
 107 // Check if all inline type fields have inline type node values
 108 bool InlineTypeBaseNode::can_merge() {
 109   for (uint i = 0; i < field_count(); ++i) {
 110     ciType* type = field_type(i);
 111     Node* val = field_value(i);
 112     if (type->is_inlinetype() &&
 113         (!val->is_InlineTypeBase() || !val->as_InlineTypeBase()->can_merge())) {
 114       return false;
 115     }
 116   }
 117   return true;
 118 }
 119 
 120 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 121 InlineTypeBaseNode* InlineTypeBaseNode::merge_with(PhaseGVN* gvn, const InlineTypeBaseNode* other, int pnum, bool transform) {
 122   // Merge oop inputs
 123   PhiNode* phi = get_oop()->as_Phi();
 124   phi->set_req(pnum, other->get_oop());
 125   if (transform) {
 126     set_oop(gvn->transform(phi));
 127   }
 128 
 129   Node* is_init = get_is_init();
 130   if (is_init->is_Phi()) {
 131     phi = is_init->as_Phi();
 132     phi->set_req(pnum, other->get_is_init());
 133     if (transform) {
 134       set_req(IsInit, gvn->transform(phi));
 135     }
 136   } else {
 137     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 138   }
 139 
 140   // Merge field values
 141   for (uint i = 0; i < field_count(); ++i) {
 142     Node* val1 =        field_value(i);
 143     Node* val2 = other->field_value(i);
 144     if (val1->is_InlineTypeBase()) {
 145       val1->as_InlineTypeBase()->merge_with(gvn, val2->as_InlineTypeBase(), pnum, transform);
 146     } else {
 147       assert(val1->is_Phi(), "must be a phi node");
 148       val1->set_req(pnum, val2);
 149     }
 150     if (transform) {
 151       set_field_value(i, gvn->transform(val1));
 152     }
 153   }
 154   return this;
 155 }
 156 
 157 // Adds a new merge path to an inline type node with phi inputs
 158 void InlineTypeBaseNode::add_new_path(Node* region) {
 159   assert(has_phi_inputs(region), "must have phi inputs");
 160 
 161   PhiNode* phi = get_oop()->as_Phi();
 162   phi->add_req(NULL);
 163   assert(phi->req() == region->req(), "must be same size as region");
 164 
 165   phi = get_is_init()->as_Phi();
 166   phi->add_req(NULL);
 167   assert(phi->req() == region->req(), "must be same size as region");
 168 
 169   for (uint i = 0; i < field_count(); ++i) {
 170     Node* val = field_value(i);
 171     if (val->is_InlineTypeBase()) {
 172       val->as_InlineTypeBase()->add_new_path(region);
 173     } else {
 174       val->as_Phi()->add_req(NULL);
 175       assert(val->req() == region->req(), "must be same size as region");
 176     }
 177   }
 178 }
 179 
 180 Node* InlineTypeBaseNode::field_value(uint index) const {
 181   assert(index < field_count(), "index out of bounds");
 182   return in(Values + index);
 183 }
 184 
 185 // Get the value of the field at the given offset.
 186 // If 'recursive' is true, flattened inline type fields will be resolved recursively.
 187 Node* InlineTypeBaseNode::field_value_by_offset(int offset, bool recursive) const {
 188   // If the field at 'offset' belongs to a flattened inline type field, 'index' refers to the
 189   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 190   int index = inline_klass()->field_index_by_offset(offset);
 191   int sub_offset = offset - field_offset(index);
 192   Node* value = field_value(index);
 193   assert(value != NULL, "field value not found");
 194   if (recursive && value->is_InlineTypeBase()) {
 195     if (field_is_flattened(index)) {
 196       // Flattened inline type field
 197       InlineTypeBaseNode* vt = value->as_InlineTypeBase();
 198       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 199       return vt->field_value_by_offset(sub_offset, recursive);
 200     } else {
 201       assert(sub_offset == 0, "should not have a sub offset");
 202       return value;
 203     }
 204   }
 205   assert(!(recursive && value->is_InlineTypeBase()), "should not be an inline type");
 206   assert(sub_offset == 0, "offset mismatch");
 207   return value;
 208 }
 209 
 210 void InlineTypeBaseNode::set_field_value(uint index, Node* value) {
 211   assert(index < field_count(), "index out of bounds");
 212   set_req(Values + index, value);
 213 }
 214 
 215 void InlineTypeBaseNode::set_field_value_by_offset(int offset, Node* value) {
 216   set_field_value(field_index(offset), value);
 217 }
 218 
 219 int InlineTypeBaseNode::field_offset(uint index) const {
 220   assert(index < field_count(), "index out of bounds");
 221   return inline_klass()->declared_nonstatic_field_at(index)->offset();
 222 }
 223 
 224 uint InlineTypeBaseNode::field_index(int offset) const {
 225   uint i = 0;
 226   for (; i < field_count() && field_offset(i) != offset; i++) { }
 227   assert(i < field_count(), "field not found");
 228   return i;
 229 }
 230 
 231 ciType* InlineTypeBaseNode::field_type(uint index) const {
 232   assert(index < field_count(), "index out of bounds");
 233   return inline_klass()->declared_nonstatic_field_at(index)->type();
 234 }
 235 
 236 bool InlineTypeBaseNode::field_is_flattened(uint index) const {
 237   assert(index < field_count(), "index out of bounds");
 238   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 239   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 240   return field->is_flattened();
 241 }
 242 
 243 bool InlineTypeBaseNode::field_is_null_free(uint index) const {
 244   assert(index < field_count(), "index out of bounds");
 245   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 246   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 247   return field->is_null_free();
 248 }
 249 
 250 void InlineTypeBaseNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 251   ciInlineKlass* vk = inline_klass();
 252   uint nfields = vk->nof_nonstatic_fields();
 253   JVMState* jvms = sfpt->jvms();
 254   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 255   assert(jvms != NULL, "missing JVMS");
 256   uint first_ind = (sfpt->req() - jvms->scloff());
 257   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(inline_ptr(),
 258 #ifdef ASSERT
 259                                                                   NULL,
 260 #endif
 261                                                                   first_ind, nfields);
 262   sobj->init_req(0, igvn->C->root());
 263   // Nullable inline types have an IsInit field that needs
 264   // to be checked before using the field values.
 265   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 266     sfpt->add_req(get_is_init());
 267   } else {
 268     sfpt->add_req(igvn->C->top());
 269   }
 270   // Iterate over the inline type fields in order of increasing
 271   // offset and add the field values to the safepoint.
 272   for (uint j = 0; j < nfields; ++j) {
 273     int offset = vk->nonstatic_field_at(j)->offset();
 274     Node* value = field_value_by_offset(offset, true /* include flattened inline type fields */);
 275     if (value->is_InlineTypeBase()) {
 276       // Add inline type field to the worklist to process later
 277       worklist.push(value);
 278     }
 279     sfpt->add_req(value);
 280   }
 281   jvms->set_endoff(sfpt->req());
 282   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 283   igvn->rehash_node_delayed(sfpt);
 284   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 285     Node* debug = sfpt->in(i);
 286     if (debug != NULL && debug->uncast() == this) {
 287       sfpt->set_req(i, sobj);
 288     }
 289   }
 290 }
 291 
 292 void InlineTypeBaseNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 293   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 294   // in the safepoint to avoid keeping field loads live just for the debug info.
 295   Node* oop = get_oop();
 296   bool use_oop = allow_oop && (is_InlineTypePtr() || is_allocated(igvn)) &&
 297                  (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 298 
 299   ResourceMark rm;
 300   Unique_Node_List safepoints;
 301   Unique_Node_List vt_worklist;
 302   Unique_Node_List worklist;
 303   worklist.push(this);
 304   while (worklist.size() > 0) {
 305     Node* n = worklist.pop();
 306     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 307       Node* use = n->fast_out(i);
 308       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 309         safepoints.push(use);
 310       } else if (use->is_ConstraintCast()) {
 311         worklist.push(use);
 312       }
 313     }
 314   }
 315 
 316   // Process all safepoint uses and scalarize inline type
 317   while (safepoints.size() > 0) {
 318     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 319     if (use_oop) {
 320       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 321         Node* debug = sfpt->in(i);
 322         if (debug != NULL && debug->uncast() == this) {
 323           sfpt->set_req(i, get_oop());
 324         }
 325       }
 326       igvn->rehash_node_delayed(sfpt);
 327     } else {
 328       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 329     }
 330   }
 331   // Now scalarize non-flattened fields
 332   for (uint i = 0; i < vt_worklist.size(); ++i) {
 333     InlineTypeBaseNode* vt = vt_worklist.at(i)->isa_InlineTypeBase();
 334     vt->make_scalar_in_safepoints(igvn);
 335   }
 336   if (outcnt() == 0) {
 337     igvn->_worklist.push(this);
 338   }
 339 }
 340 
 341 const TypePtr* InlineTypeBaseNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 342   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 343   const TypePtr* adr_type = NULL;
 344   bool is_array = ary_type != NULL;
 345   if ((decorators & C2_MISMATCHED) != 0) {
 346     adr_type = TypeRawPtr::BOTTOM;
 347   } else if (is_array) {
 348     // In the case of a flattened inline type array, each field has its own slice
 349     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 350   } else {
 351     ciField* field = holder->get_field_by_offset(offset, false);
 352     assert(field != NULL, "field not found");
 353     adr_type = gvn.C->alias_type(field)->adr_type();
 354   }
 355   return adr_type;
 356 }
 357 
 358 void InlineTypeBaseNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 359   // Initialize the inline type by loading its field values from
 360   // memory and adding the values as input edges to the node.
 361   for (uint i = 0; i < field_count(); ++i) {
 362     int offset = holder_offset + field_offset(i);
 363     Node* value = NULL;
 364     ciType* ft = field_type(i);
 365     bool null_free = field_is_null_free(i);
 366     if (null_free && ft->as_inline_klass()->is_empty()) {
 367       // Loading from a field of an empty inline type. Just return the default instance.
 368       value = InlineTypeNode::make_default(kit->gvn(), ft->as_inline_klass());
 369     } else if (field_is_flattened(i)) {
 370       // Recursively load the flattened inline type field
 371       value = InlineTypeNode::make_from_flattened(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators);
 372     } else {
 373       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 374       bool is_array = (oop_ptr->isa_aryptr() != NULL);
 375       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 376       if (base->is_Con() && !is_array && !mismatched) {
 377         // If the oop to the inline type is constant (static final field), we can
 378         // also treat the fields as constants because the inline type is immutable.
 379         ciObject* constant_oop = oop_ptr->const_oop();
 380         ciField* field = holder->get_field_by_offset(offset, false);
 381         assert(field != NULL, "field not found");
 382         ciConstant constant = constant_oop->as_instance()->field_value(field);
 383         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 384         assert(con_type != NULL, "type not found");
 385         value = kit->gvn().transform(kit->makecon(con_type));
 386         // Check type of constant which might be more precise than the static field type
 387         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 388           ft = con_type->inline_klass();
 389           null_free = true;
 390         }
 391       } else {
 392         // Load field value from memory
 393         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 394         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 395         BasicType bt = type2field[ft->basic_type()];
 396         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 397         const Type* val_type = Type::get_const_type(ft);
 398         if (is_array) {
 399           decorators |= IS_ARRAY;
 400         }
 401         value = kit->access_load_at(base, adr, adr_type, val_type, bt, decorators);
 402       }
 403       // Loading a non-flattened inline type from memory
 404       if (ft->is_inlinetype()) {
 405         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass(), null_free);
 406       }
 407     }
 408     set_field_value(i, value);
 409   }
 410 }
 411 
 412 void InlineTypeBaseNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 413   if (kit->gvn().type(base)->isa_aryptr()) {
 414     kit->C->set_flattened_accesses();
 415   }
 416   // The inline type is embedded into the object without an oop header. Subtract the
 417   // offset of the first field to account for the missing header when storing the values.
 418   if (holder == NULL) {
 419     holder = inline_klass();
 420   }
 421   holder_offset -= inline_klass()->first_field_offset();
 422   store(kit, base, ptr, holder, holder_offset, decorators);
 423 }
 424 
 425 void InlineTypeBaseNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 426   // Write field values to memory
 427   for (uint i = 0; i < field_count(); ++i) {
 428     int offset = holder_offset + field_offset(i);
 429     Node* value = field_value(i);
 430     ciType* ft = field_type(i);
 431     if (field_is_flattened(i)) {
 432       // Recursively store the flattened inline type field
 433       if (!value->is_InlineTypeBase()) {
 434         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass());
 435       }
 436       value->as_InlineTypeBase()->store_flattened(kit, base, ptr, holder, offset, decorators);
 437     } else {
 438       // Store field value to memory
 439       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 440       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 441       BasicType bt = type2field[ft->basic_type()];
 442       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 443       const Type* val_type = Type::get_const_type(ft);
 444       const TypeAryPtr* ary_type = kit->gvn().type(base)->isa_aryptr();
 445       if (ary_type != NULL) {
 446         decorators |= IS_ARRAY;
 447       }
 448       kit->access_store_at(base, adr, adr_type, value, val_type, bt, decorators);
 449     }
 450   }
 451 }
 452 
 453 InlineTypePtrNode* InlineTypeBaseNode::buffer(GraphKit* kit, bool safe_for_replace) {
 454   assert(is_InlineType(), "sanity");
 455 
 456   // Check if inline type is already buffered
 457   Node* not_buffered_ctl = kit->top();
 458   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 459   if (not_buffered_ctl->is_top()) {
 460     // Already buffered
 461     InlineTypePtrNode* ptr = as_ptr(&kit->gvn(), false);
 462     if (safe_for_replace) {
 463       kit->replace_in_map(this, ptr);
 464     }
 465     return ptr;
 466   }
 467   Node* buffered_ctl = kit->control();
 468   kit->set_control(not_buffered_ctl);
 469 
 470   // Inline type is not buffered, check if it is null.
 471   Node* null_ctl = kit->top();
 472   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 473   bool null_free = null_ctl->is_top();
 474 
 475   RegionNode* region = new RegionNode(4);
 476   PhiNode* oop = PhiNode::make(region, not_null_oop, inline_ptr()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 477 
 478   // InlineType is already buffered
 479   region->init_req(1, buffered_ctl);
 480   oop->init_req(1, not_null_oop);
 481 
 482   // InlineType is null
 483   region->init_req(2, null_ctl);
 484   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 485 
 486   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 487   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 488 
 489   int bci = kit->bci();
 490   bool reexecute = kit->jvms()->should_reexecute();
 491   if (!kit->stopped()) {
 492     assert(!is_allocated(&kit->gvn()), "already buffered");
 493 
 494     // Allocate and initialize buffer
 495     PreserveJVMState pjvms(kit);
 496     // Propagate re-execution state and bci
 497     kit->set_bci(bci);
 498     kit->jvms()->set_bci(bci);
 499     kit->jvms()->set_should_reexecute(reexecute);
 500 
 501     kit->kill_dead_locals();
 502     ciInlineKlass* vk = inline_klass();
 503     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 504     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true, this);
 505     store(kit, alloc_oop, alloc_oop, vk);
 506 
 507     // Do not let stores that initialize this buffer be reordered with a subsequent
 508     // store that would make this buffer accessible by other threads.
 509     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 510     assert(alloc != NULL, "must have an allocation node");
 511     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 512 
 513     region->init_req(3, kit->control());
 514     oop   ->init_req(3, alloc_oop);
 515     io    ->init_req(3, kit->i_o());
 516     mem   ->init_req(3, kit->merged_memory());
 517   }
 518 
 519   // Update GraphKit
 520   kit->set_control(kit->gvn().transform(region));
 521   kit->set_i_o(kit->gvn().transform(io));
 522   kit->set_all_memory(kit->gvn().transform(mem));
 523   kit->record_for_igvn(region);
 524   kit->record_for_igvn(oop);
 525   kit->record_for_igvn(io);
 526   kit->record_for_igvn(mem);
 527 
 528   // Use cloned InlineTypeNode to propagate oop from now on
 529   Node* res_oop = kit->gvn().transform(oop);
 530   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
 531   vt->set_oop(res_oop);
 532   vt = kit->gvn().transform(vt)->as_InlineTypeBase();
 533   if (safe_for_replace) {
 534     kit->replace_in_map(this, vt);
 535   }
 536   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 537   // Make sure it gets a chance to remove this allocation.
 538   kit->C->set_has_split_ifs(true);
 539   return vt->as_ptr(&kit->gvn(), null_free);
 540 }
 541 
 542 bool InlineTypeBaseNode::is_allocated(PhaseGVN* phase) const {
 543   Node* oop = get_oop();
 544   const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type();
 545   return !oop_type->maybe_null();
 546 }
 547 
 548 InlineTypePtrNode* InlineTypeBaseNode::as_ptr(PhaseGVN* phase, bool null_free) const {
 549   assert(!null_free || is_allocated(phase), "must be allocated");
 550   if (is_InlineTypePtr()) {
 551     return as_InlineTypePtr();
 552   }
 553   return phase->transform(new InlineTypePtrNode(this, null_free))->as_InlineTypePtr();
 554 }
 555 
 556 // When a call returns multiple values, it has several result
 557 // projections, one per field. Replacing the result of the call by an
 558 // inline type node (after late inlining) requires that for each result
 559 // projection, we find the corresponding inline type field.
 560 void InlineTypeBaseNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C, bool null_free) {
 561   ciInlineKlass* vk = inline_klass();
 562   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 563     ProjNode* pn = call->fast_out(i)->as_Proj();
 564     uint con = pn->_con;
 565     Node* field = NULL;
 566     if (con == TypeFunc::Parms) {
 567       field = get_oop();
 568     } else if (!null_free && con == (call->tf()->range_cc()->cnt() - 1)) {
 569       field = get_is_init();
 570     } else if (con > TypeFunc::Parms) {
 571       uint field_nb = con - (TypeFunc::Parms+1);
 572       int extra = 0;
 573       for (uint j = 0; j < field_nb - extra; j++) {
 574         ciField* f = vk->nonstatic_field_at(j);
 575         BasicType bt = f->type()->basic_type();
 576         if (bt == T_LONG || bt == T_DOUBLE) {
 577           extra++;
 578         }
 579       }
 580       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 581       field = field_value_by_offset(f->offset(), true);
 582       if (field->is_InlineType()) {
 583         assert(field->as_InlineType()->is_allocated(&kit->gvn()), "must be allocated");
 584         field = field->as_InlineType()->get_oop();
 585       }
 586     }
 587     if (field != NULL) {
 588       C->gvn_replace_by(pn, field);
 589       C->initial_gvn()->hash_delete(pn);
 590       pn->set_req(0, C->top());
 591       --i; --imax;
 592     }
 593   }
 594 }
 595 
 596 Node* InlineTypeBaseNode::allocate_fields(GraphKit* kit) {
 597   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
 598   for (uint i = 0; i < field_count(); i++) {
 599      Node* value = field_value(i);
 600      if (field_is_flattened(i)) {
 601        // Flattened inline type field
 602        vt->set_field_value(i, value->as_InlineTypeBase()->allocate_fields(kit));
 603      } else if (value->is_InlineType()) {
 604        // Non-flattened inline type field
 605        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 606      }
 607   }
 608   vt = kit->gvn().transform(vt)->as_InlineTypeBase();
 609   kit->replace_in_map(this, vt);
 610   return vt;
 611 }
 612 
 613 Node* InlineTypeBaseNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 614   Node* is_init = get_is_init();
 615   if (is_init->isa_InlineTypePtr()) {
 616     set_req(IsInit, is_init->as_InlineTypePtr()->get_is_init());
 617     return this;
 618   }
 619   Node* oop = get_oop();
 620   if (oop->isa_InlineTypePtr() && !phase->type(oop)->maybe_null()) {
 621     InlineTypePtrNode* vtptr = oop->as_InlineTypePtr();
 622     set_oop(vtptr->get_oop());
 623     set_is_init(*phase);
 624     for (uint i = Values; i < vtptr->req(); ++i) {
 625       set_req(i, vtptr->in(i));
 626     }
 627     return this;
 628   }
 629   return NULL;
 630 }
 631 
 632 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk) {
 633   // Create a new InlineTypeNode with uninitialized values and NULL oop
 634   Node* oop = (vk->is_empty() && vk->is_initialized()) ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 635   InlineTypeNode* vt = new InlineTypeNode(vk, oop);
 636   vt->set_is_init(gvn);
 637   return vt;
 638 }
 639 
 640 Node* InlineTypeBaseNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 641   // Returns the constant oop of the default inline type allocation
 642   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 643 }
 644 
 645 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk) {
 646   // Create a new InlineTypeNode with default values
 647   Node* oop = vk->is_initialized() ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 648   InlineTypeNode* vt = new InlineTypeNode(vk, oop);
 649   vt->set_is_init(gvn);
 650   for (uint i = 0; i < vt->field_count(); ++i) {
 651     ciType* field_type = vt->field_type(i);
 652     Node* value = gvn.zerocon(field_type->basic_type());
 653     if (field_type->is_inlinetype()) {
 654       ciInlineKlass* vk = field_type->as_inline_klass();
 655       if (vt->field_is_null_free(i)) {
 656         value = make_default(gvn, vk);
 657       } else {
 658         value = InlineTypePtrNode::make_null(gvn, vk);
 659       }
 660     }
 661     vt->set_field_value(i, value);
 662   }
 663   vt = gvn.transform(vt)->as_InlineType();
 664   assert(vt->is_default(&gvn), "must be the default inline type");
 665   return vt;
 666 }
 667 
 668 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
 669   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT));
 670   vt->set_req(IsInit, gvn.intcon(0));
 671   for (uint i = 0; i < vt->field_count(); i++) {
 672     ciType* field_type = vt->field_type(i);
 673     Node* value = gvn.zerocon(field_type->basic_type());
 674     if (field_type->is_inlinetype()) {
 675       if (vt->field_is_null_free(i)) {
 676         value = InlineTypeNode::make_null(gvn, field_type->as_inline_klass());
 677       } else {
 678         value = InlineTypePtrNode::make_null(gvn, field_type->as_inline_klass());
 679       }
 680     }
 681     vt->set_field_value(i, value);
 682   }
 683   return gvn.transform(vt)->as_InlineType();
 684 }
 685 
 686 bool InlineTypeBaseNode::is_default(PhaseGVN* gvn) const {
 687   const Type* tinit = gvn->type(in(IsInit));
 688   if (!tinit->isa_int() || !tinit->is_int()->is_con(1)) {
 689     return false; // May be null
 690   }
 691   for (uint i = 0; i < field_count(); ++i) {
 692     Node* value = field_value(i);
 693     if (value->is_InlineTypePtr()) {
 694       value = value->as_InlineTypePtr()->get_oop();
 695     }
 696     if (!gvn->type(value)->is_zero_type() &&
 697         !(field_is_null_free(i) && value->is_InlineType() && value->as_InlineType()->is_default(gvn))) {
 698       return false;
 699     }
 700   }
 701   return true;
 702 }
 703 
 704 InlineTypeBaseNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free) {
 705   PhaseGVN& gvn = kit->gvn();
 706 
 707   if (vk->is_empty() && null_free) {
 708     InlineTypeNode* def = make_default(gvn, vk);
 709     kit->record_for_igvn(def);
 710     return def;
 711   }
 712   // Create and initialize an InlineTypeNode by loading all field
 713   // values from a heap-allocated version and also save the oop.
 714   InlineTypeBaseNode* vt = NULL;
 715 
 716   if (oop->isa_InlineTypePtr()) {
 717     return oop->as_InlineTypePtr();
 718   } else if (gvn.type(oop)->maybe_null()) {
 719     // Add a null check because the oop may be null
 720     Node* null_ctl = kit->top();
 721     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 722     if (kit->stopped()) {
 723       // Constant null
 724       kit->set_control(null_ctl);
 725       if (null_free) {
 726         vt = make_default(gvn, vk);
 727       } else {
 728         vt = InlineTypePtrNode::make_null(gvn, vk);
 729       }
 730       kit->record_for_igvn(vt);
 731       return vt;
 732     }
 733     vt = new InlineTypePtrNode(vk, not_null_oop, null_free);
 734     vt->set_is_init(gvn);
 735     vt->load(kit, not_null_oop, not_null_oop, vk, /* holder_offset */ 0);
 736 
 737     if (null_ctl != kit->top()) {
 738       InlineTypeBaseNode* null_vt = NULL;
 739       if (null_free) {
 740         null_vt = make_default(gvn, vk)->as_ptr(&gvn);
 741       } else {
 742         null_vt = InlineTypePtrNode::make_null(gvn, vk);
 743       }
 744       Node* region = new RegionNode(3);
 745       region->init_req(1, kit->control());
 746       region->init_req(2, null_ctl);
 747 
 748       vt = vt->clone_with_phis(&gvn, region);
 749       vt->merge_with(&gvn, null_vt, 2, true);
 750       if (!null_free) {
 751         vt->set_oop(oop);
 752       }
 753       kit->set_control(gvn.transform(region));
 754     }
 755   } else {
 756     // Oop can never be null
 757     vt = new InlineTypePtrNode(vk, oop, /* null_free= */ true);
 758     Node* init_ctl = kit->control();
 759     vt->set_is_init(gvn);
 760     vt->load(kit, oop, oop, vk, /* holder_offset */ 0);
 761 // TODO fix with JDK-8278390
 762 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineTypePtr ||
 763 //           AllocateNode::Ideal_allocation(oop, &gvn) != NULL || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 764   }
 765   assert(!null_free || vt->is_allocated(&gvn), "inline type should be allocated");
 766   kit->record_for_igvn(vt);
 767   return gvn.transform(vt)->as_InlineTypeBase();
 768 }
 769 
 770 // GraphKit wrapper for the 'make_from_flattened' method
 771 InlineTypeNode* InlineTypeNode::make_from_flattened(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 772   if (kit->gvn().type(obj)->isa_aryptr()) {
 773     kit->C->set_flattened_accesses();
 774   }
 775   // Create and initialize an InlineTypeNode by loading all field values from
 776   // a flattened inline type field at 'holder_offset' or from an inline type array.
 777   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 778   // The inline type is flattened into the object without an oop header. Subtract the
 779   // offset of the first field to account for the missing header when loading the values.
 780   holder_offset -= vk->first_field_offset();
 781   vt->load(kit, obj, ptr, holder, holder_offset, decorators);
 782   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 783   return kit->gvn().transform(vt)->as_InlineType();
 784 }
 785 
 786 InlineTypeBaseNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 787   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 788   if (!in) {
 789     // Keep track of the oop. The returned inline type might already be buffered.
 790     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 791     vt->set_oop(oop);
 792   }
 793   vt->initialize_fields(kit, multi, base_input, in, null_free);
 794   return kit->gvn().transform(vt)->as_InlineTypeBase();
 795 }
 796 
 797 InlineTypeNode* InlineTypeBaseNode::make_larval(GraphKit* kit, bool allocate) const {
 798   ciInlineKlass* vk = inline_klass();
 799   InlineTypeNode* res = InlineTypeNode::make_uninitialized(kit->gvn(), vk);
 800   for (uint i = 1; i < req(); ++i) {
 801     res->set_req(i, in(i));
 802   }
 803 
 804   if (allocate) {
 805     // Re-execute if buffering triggers deoptimization
 806     PreserveReexecuteState preexecs(kit);
 807     kit->jvms()->set_should_reexecute(true);
 808     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 809     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, true);
 810     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 811     alloc->_larval = true;
 812 
 813     store(kit, alloc_oop, alloc_oop, vk);
 814     res->set_oop(alloc_oop);
 815   }
 816   res->set_type(TypeInlineType::make(vk, true));
 817   res = kit->gvn().transform(res)->as_InlineType();
 818   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
 819   return res;
 820 }
 821 
 822 InlineTypeNode* InlineTypeBaseNode::finish_larval(GraphKit* kit) const {
 823   Node* obj = get_oop();
 824   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
 825   Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 826   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
 827   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
 828 
 829   // Do not let stores that initialize this buffer be reordered with a subsequent
 830   // store that would make this buffer accessible by other threads.
 831   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj, &kit->gvn());
 832   assert(alloc != NULL, "must have an allocation node");
 833   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 834 
 835   ciInlineKlass* vk = inline_klass();
 836   InlineTypeNode* res = InlineTypeNode::make_uninitialized(kit->gvn(), vk);
 837   for (uint i = 1; i < req(); ++i) {
 838     res->set_req(i, in(i));
 839   }
 840   res->set_type(TypeInlineType::make(vk, false));
 841   res = kit->gvn().transform(res)->as_InlineType();
 842   return res;
 843 }
 844 
 845 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
 846   if (vk == NULL) {
 847     vk = inline_klass();
 848   }
 849   if (field_count() == 0 && vk->is_initialized()) {
 850     const Type* tinit = phase->type(in(IsInit));
 851     if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
 852       assert(is_allocated(phase), "must be allocated");
 853       return get_oop();
 854     } else {
 855       // TODO 8284443
 856       return NULL;
 857     }
 858   }
 859   for (uint i = 0; i < field_count(); ++i) {
 860     int offset = holder_offset + field_offset(i);
 861     Node* value = field_value(i);
 862     if (value->is_InlineTypeBase()) {
 863       InlineTypeBaseNode* vt = value->as_InlineTypeBase();
 864       if (vt->type()->inline_klass()->is_empty()) {
 865         continue;
 866       } else if (field_is_flattened(i) && vt->is_InlineType()) {
 867         // Check inline type field load recursively
 868         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
 869         if (base == NULL) {
 870           return NULL;
 871         }
 872         continue;
 873       } else {
 874         value = vt->get_oop();
 875         if (value->Opcode() == Op_CastPP) {
 876           // Skip CastPP
 877           value = value->in(1);
 878         }
 879       }
 880     }
 881     if (value->isa_DecodeN()) {
 882       // Skip DecodeN
 883       value = value->in(1);
 884     }
 885     if (value->isa_Load()) {
 886       // Check if base and offset of field load matches inline type layout
 887       intptr_t loffset = 0;
 888       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
 889       if (lbase == NULL || (lbase != base && base != NULL) || loffset != offset) {
 890         return NULL;
 891       } else if (base == NULL) {
 892         // Set base and check if pointer type matches
 893         base = lbase;
 894         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
 895         if (vtptr == NULL || !vtptr->instance_klass()->equals(vk)) {
 896           return NULL;
 897         }
 898       }
 899     } else {
 900       return NULL;
 901     }
 902   }
 903   return base;
 904 }
 905 
 906 Node* InlineTypeBaseNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
 907   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
 908   intptr_t bits = tk->get_con();
 909   set_nth_bit(bits, 0);
 910   return gvn.longcon((jlong)bits);
 911 }
 912 
 913 void InlineTypeBaseNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
 914   if (!null_free && in) {
 915     n->init_req(base_input++, get_is_init());
 916   }
 917   for (uint i = 0; i < field_count(); i++) {
 918     Node* arg = field_value(i);
 919     if (field_is_flattened(i)) {
 920       // Flattened inline type field
 921       arg->as_InlineTypeBase()->pass_fields(kit, n, base_input, in);
 922     } else {
 923       if (arg->is_InlineType()) {
 924         // Non-flattened inline type field
 925         InlineTypeNode* vt = arg->as_InlineType();
 926         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
 927         arg = vt->buffer(kit);
 928       }
 929       // Initialize call/return arguments
 930       n->init_req(base_input++, arg);
 931       if (field_type(i)->size() == 2) {
 932         n->init_req(base_input++, kit->top());
 933       }
 934     }
 935   }
 936   // The last argument is used to pass IsInit information to compiled code and not required here.
 937   if (!null_free && !in) {
 938     n->init_req(base_input++, kit->top());
 939   }
 940 }
 941 
 942 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region) {
 943   PhaseGVN& gvn = kit->gvn();
 944   Node* is_init = NULL;
 945   if (!null_free) {
 946     // Nullable inline type
 947     if (in) {
 948       // Set IsInit field
 949       if (multi->is_Start()) {
 950         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
 951       } else {
 952         is_init = multi->as_Call()->in(base_input);
 953       }
 954       set_req(IsInit, is_init);
 955       base_input++;
 956     }
 957     // Add a null check to make subsequent loads dependent on
 958     assert(null_check_region == NULL, "already set");
 959     if (is_init == NULL) {
 960       // Will only be initialized below, use dummy node for now
 961       is_init = new Node(1);
 962       gvn.set_type_bottom(is_init);
 963     }
 964     Node* null_ctrl = kit->top();
 965     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
 966     Node* non_null_ctrl = kit->control();
 967     null_check_region = new RegionNode(3);
 968     null_check_region->init_req(1, non_null_ctrl);
 969     null_check_region->init_req(2, null_ctrl);
 970     null_check_region = gvn.transform(null_check_region);
 971     kit->set_control(null_check_region);
 972   }
 973 
 974   for (uint i = 0; i < field_count(); ++i) {
 975     ciType* type = field_type(i);
 976     Node* parm = NULL;
 977     if (field_is_flattened(i)) {
 978       // Flattened inline type field
 979       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
 980       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region);
 981       parm = gvn.transform(vt);
 982     } else {
 983       if (multi->is_Start()) {
 984         assert(in, "return from start?");
 985         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
 986       } else if (in) {
 987         parm = multi->as_Call()->in(base_input);
 988       } else {
 989         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
 990       }
 991       // Non-flattened inline type field
 992       if (type->is_inlinetype()) {
 993         if (null_check_region != NULL) {
 994           // Holder is nullable, set field to NULL if holder is NULL to avoid loading from uninitialized memory
 995           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
 996           parm->set_req(2, kit->zerocon(T_OBJECT));
 997           parm = gvn.transform(parm);
 998         }
 999         parm = make_from_oop(kit, parm, type->as_inline_klass(), field_is_null_free(i));
1000       }
1001       base_input += type->size();
1002     }
1003     assert(parm != NULL, "should never be null");
1004     assert(field_value(i) == NULL, "already set");
1005     set_field_value(i, parm);
1006     gvn.record_for_igvn(parm);
1007   }
1008   // The last argument is used to pass IsInit information to compiled code
1009   if (!null_free && !in) {
1010     Node* cmp = is_init->raw_out(0);
1011     is_init= gvn.transform(new ProjNode(multi->as_Call(), base_input));
1012     set_req(IsInit, is_init);
1013     gvn.hash_delete(cmp);
1014     cmp->set_req(1, is_init);
1015     gvn.hash_find_insert(cmp);
1016     base_input++;
1017   }
1018 }
1019 
1020 // Replace a buffer allocation by a dominating allocation
1021 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
1022   // Remove initializing stores and GC barriers
1023   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
1024     Node* use = res->fast_out(i);
1025     if (use->is_AddP()) {
1026       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1027         Node* store = use->fast_out(j)->isa_Store();
1028         if (store != NULL) {
1029           igvn->rehash_node_delayed(store);
1030           igvn->replace_in_uses(store, store->in(MemNode::Memory));
1031         }
1032       }
1033     } else if (use->Opcode() == Op_CastP2X) {
1034       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
1035         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
1036         // we store into, as well as the value we are storing. Skip if this is a
1037         // barrier for storing 'res' into another object.
1038         continue;
1039       }
1040       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1041       bs->eliminate_gc_barrier(igvn, use);
1042       --i; --imax;
1043     }
1044   }
1045   igvn->replace_node(res, dom);
1046 }
1047 
1048 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1049   Node* oop = get_oop();
1050   if (is_default(phase) && inline_klass()->is_initialized() &&
1051       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
1052     // Use the pre-allocated oop for default inline types
1053     set_oop(default_oop(*phase, inline_klass()));
1054     assert(is_allocated(phase), "should now be allocated");
1055     return this;
1056   }
1057 
1058   if (!is_allocated(phase)) {
1059     // Save base oop if fields are loaded from memory and the inline
1060     // type is not buffered (in this case we should not use the oop).
1061     Node* base = is_loaded(phase);
1062     if (base != NULL && !phase->type(base)->maybe_null()) {
1063       set_oop(base);
1064       assert(is_allocated(phase), "should now be allocated");
1065       return this;
1066     }
1067   }
1068 
1069   if (can_reshape) {
1070     PhaseIterGVN* igvn = phase->is_IterGVN();
1071 
1072     if (is_allocated(phase)) {
1073       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1074       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1075       // This can happen with late inlining when we first allocate an inline type argument
1076       // but later decide to inline the call after the callee code also triggered allocation.
1077       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1078         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1079         if (alloc != NULL && alloc->in(AllocateNode::InlineTypeNode) == this && !alloc->_is_scalar_replaceable) {
1080           // Found a re-allocation
1081           Node* res = alloc->result_cast();
1082           if (res != NULL && res->is_CheckCastPP()) {
1083             // Replace allocation by oop and unlink AllocateNode
1084             replace_allocation(igvn, res, oop);
1085             igvn->replace_input_of(alloc, AllocateNode::InlineTypeNode, igvn->C->top());
1086             --i; --imax;
1087           }
1088         }
1089       }
1090     }
1091   }
1092   return InlineTypeBaseNode::Ideal(phase, can_reshape);
1093 }
1094 
1095 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1096 void InlineTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase) {
1097   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1098   // will be removed anyway and changing the memory chain will confuse other optimizations.
1099   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1100     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1101     if (alloc != NULL && alloc->in(AllocateNode::InlineTypeNode) == this && !alloc->_is_scalar_replaceable) {
1102       Node* res = alloc->result_cast();
1103       if (res == NULL || !res->is_CheckCastPP()) {
1104         break; // No unique CheckCastPP
1105       }
1106       assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1107       // Search for a dominating allocation of the same inline type
1108       Node* res_dom = res;
1109       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1110         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1111         if (alloc_other != NULL && alloc_other->in(AllocateNode::InlineTypeNode) == this && !alloc_other->_is_scalar_replaceable) {
1112           Node* res_other = alloc_other->result_cast();
1113           if (res_other != NULL && res_other->is_CheckCastPP() && res_other != res_dom &&
1114               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1115             res_dom = res_other;
1116           }
1117         }
1118       }
1119       if (res_dom != res) {
1120         // Replace allocation by dominating one.
1121         replace_allocation(igvn, res, res_dom);
1122         // The result of the dominated allocation is now unused and will be removed
1123         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1124         igvn->_worklist.push(alloc);
1125       }
1126     }
1127   }
1128 
1129   // Process users
1130   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1131     Node* out = fast_out(i);
1132     if (out->is_InlineType()) {
1133       // Recursively process inline type users
1134       igvn->rehash_node_delayed(out);
1135       out->as_InlineType()->remove_redundant_allocations(igvn, phase);
1136     } else if (out->isa_Allocate() != NULL) {
1137       // Unlink AllocateNode
1138       assert(out->in(AllocateNode::InlineTypeNode) == this, "should be linked");
1139       igvn->replace_input_of(out, AllocateNode::InlineTypeNode, igvn->C->top());
1140       --i; --imax;
1141     }
1142   }
1143 }
1144 
1145 InlineTypePtrNode* InlineTypePtrNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
1146   InlineTypePtrNode* ptr = new InlineTypePtrNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1147   ptr->set_req(IsInit, gvn.intcon(0));
1148   for (uint i = 0; i < ptr->field_count(); i++) {
1149     ciType* field_type = ptr->field_type(i);
1150     Node* value = gvn.zerocon(field_type->basic_type());
1151     if (field_type->is_inlinetype()) {
1152       if (ptr->field_is_null_free(i)) {
1153         value = InlineTypeNode::make_null(gvn, field_type->as_inline_klass());
1154       } else {
1155         value = InlineTypePtrNode::make_null(gvn, field_type->as_inline_klass());
1156       }
1157     }
1158     ptr->set_field_value(i, value);
1159   }
1160   return gvn.transform(ptr)->as_InlineTypePtr();
1161 }
1162 
1163 Node* InlineTypePtrNode::Identity(PhaseGVN* phase) {
1164   if (get_oop()->is_InlineTypePtr()) {
1165     return get_oop();
1166   }
1167   return this;
1168 }
1169 
1170 const Type* InlineTypePtrNode::Value(PhaseGVN* phase) const {
1171   const Type* t = phase->type(get_oop())->filter_speculative(_type);
1172   if (t->singleton()) {
1173     // Don't replace InlineTypePtr by a constant
1174     t = _type;
1175   }
1176   const Type* tinit = phase->type(in(IsInit));
1177   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1178     t = t->join_speculative(TypePtr::NOTNULL);
1179   }
1180   return t;
1181 }