1 /*
   2  * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 // Clones the inline type to handle control flow merges involving multiple inline types.
  37 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  38 InlineTypeBaseNode* InlineTypeBaseNode::clone_with_phis(PhaseGVN* gvn, Node* region) {
  39   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
  40   if (vt->is_InlineTypePtr()) {
  41     // Use nullable type
  42     const Type* t = Type::get_const_type(inline_klass());
  43     gvn->set_type(vt, t);
  44     vt->as_InlineTypePtr()->set_type(t);
  45   }
  46 
  47   // Create a PhiNode for merging the oop values
  48   const Type* phi_type = Type::get_const_type(inline_klass());
  49   PhiNode* oop = PhiNode::make(region, vt->get_oop(), phi_type);
  50   gvn->set_type(oop, phi_type);
  51   gvn->record_for_igvn(oop);
  52   vt->set_oop(oop);
  53 
  54   // Create a PhiNode for merging the is_init values
  55   phi_type = Type::get_const_basic_type(T_BOOLEAN);
  56   PhiNode* is_init = PhiNode::make(region, vt->get_is_init(), phi_type);
  57   gvn->set_type(is_init, phi_type);
  58   gvn->record_for_igvn(is_init);
  59   vt->set_req(IsInit, is_init);
  60 
  61   // Create a PhiNode each for merging the field values
  62   for (uint i = 0; i < vt->field_count(); ++i) {
  63     ciType* type = vt->field_type(i);
  64     Node*  value = vt->field_value(i);
  65     if (value->is_InlineTypeBase()) {
  66       // Handle inline type fields recursively
  67       value = value->as_InlineTypeBase()->clone_with_phis(gvn, region);
  68     } else {
  69       phi_type = Type::get_const_type(type);
  70       value = PhiNode::make(region, value, phi_type);
  71       gvn->set_type(value, phi_type);
  72       gvn->record_for_igvn(value);
  73     }
  74     vt->set_field_value(i, value);
  75   }
  76   gvn->set_type(vt, vt->bottom_type());
  77   gvn->record_for_igvn(vt);
  78   return vt;
  79 }
  80 
  81 // Checks if the inputs of the InlineTypeBaseTypeNode were replaced by PhiNodes
  82 // for the given region (see InlineTypeBaseTypeNode::clone_with_phis).
  83 bool InlineTypeBaseNode::has_phi_inputs(Node* region) {
  84   // Check oop input
  85   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
  86 #ifdef ASSERT
  87   if (result) {
  88     // Check all field value inputs for consistency
  89     for (uint i = Values; i < field_count(); ++i) {
  90       Node* n = in(i);
  91       if (n->is_InlineTypeBase()) {
  92         assert(n->as_InlineTypeBase()->has_phi_inputs(region), "inconsistent phi inputs");
  93       } else {
  94         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
  95       }
  96     }
  97   }
  98 #endif
  99   return result;
 100 }
 101 
 102 // Check if all inline type fields have inline type node values
 103 bool InlineTypeBaseNode::can_merge() {
 104   for (uint i = 0; i < field_count(); ++i) {
 105     ciType* type = field_type(i);
 106     Node* val = field_value(i);
 107     if (type->is_inlinetype() &&
 108         (!val->is_InlineTypeBase() || !val->as_InlineTypeBase()->can_merge())) {
 109       return false;
 110     }
 111   }
 112   return true;
 113 }
 114 
 115 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 116 InlineTypeBaseNode* InlineTypeBaseNode::merge_with(PhaseGVN* gvn, const InlineTypeBaseNode* other, int pnum, bool transform) {
 117   // Merge oop inputs
 118   PhiNode* phi = get_oop()->as_Phi();
 119   phi->set_req(pnum, other->get_oop());
 120   if (transform) {
 121     set_oop(gvn->transform(phi));
 122   }
 123 
 124   phi = get_is_init()->as_Phi();
 125   phi->set_req(pnum, other->get_is_init());
 126   if (transform) {
 127     set_req(IsInit, gvn->transform(phi));
 128   }
 129 
 130   // Merge field values
 131   for (uint i = 0; i < field_count(); ++i) {
 132     Node* val1 =        field_value(i);
 133     Node* val2 = other->field_value(i);
 134     if (val1->is_InlineTypeBase()) {
 135       val1->as_InlineTypeBase()->merge_with(gvn, val2->as_InlineTypeBase(), pnum, transform);
 136     } else {
 137       assert(val1->is_Phi(), "must be a phi node");
 138       val1->set_req(pnum, val2);
 139     }
 140     if (transform) {
 141       set_field_value(i, gvn->transform(val1));
 142     }
 143   }
 144   return this;
 145 }
 146 
 147 // Adds a new merge path to an inline type node with phi inputs
 148 void InlineTypeBaseNode::add_new_path(Node* region) {
 149   assert(has_phi_inputs(region), "must have phi inputs");
 150 
 151   PhiNode* phi = get_oop()->as_Phi();
 152   phi->add_req(NULL);
 153   assert(phi->req() == region->req(), "must be same size as region");
 154 
 155   phi = get_is_init()->as_Phi();
 156   phi->add_req(NULL);
 157   assert(phi->req() == region->req(), "must be same size as region");
 158 
 159   for (uint i = 0; i < field_count(); ++i) {
 160     Node* val = field_value(i);
 161     if (val->is_InlineTypeBase()) {
 162       val->as_InlineTypeBase()->add_new_path(region);
 163     } else {
 164       val->as_Phi()->add_req(NULL);
 165       assert(val->req() == region->req(), "must be same size as region");
 166     }
 167   }
 168 }
 169 
 170 Node* InlineTypeBaseNode::field_value(uint index) const {
 171   assert(index < field_count(), "index out of bounds");
 172   return in(Values + index);
 173 }
 174 
 175 // Get the value of the field at the given offset.
 176 // If 'recursive' is true, flattened inline type fields will be resolved recursively.
 177 Node* InlineTypeBaseNode::field_value_by_offset(int offset, bool recursive) const {
 178   // If the field at 'offset' belongs to a flattened inline type field, 'index' refers to the
 179   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 180   int index = inline_klass()->field_index_by_offset(offset);
 181   int sub_offset = offset - field_offset(index);
 182   Node* value = field_value(index);
 183   assert(value != NULL, "field value not found");
 184   if (recursive && value->is_InlineType()) {
 185     InlineTypeNode* vt = value->as_InlineType();
 186     if (field_is_flattened(index)) {
 187       // Flattened inline type field
 188       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 189       return vt->field_value_by_offset(sub_offset, recursive);
 190     } else {
 191       assert(sub_offset == 0, "should not have a sub offset");
 192       return vt;
 193     }
 194   }
 195   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 196   assert(sub_offset == 0, "offset mismatch");
 197   return value;
 198 }
 199 
 200 void InlineTypeBaseNode::set_field_value(uint index, Node* value) {
 201   assert(index < field_count(), "index out of bounds");
 202   set_req(Values + index, value);
 203 }
 204 
 205 void InlineTypeBaseNode::set_field_value_by_offset(int offset, Node* value) {
 206   set_field_value(field_index(offset), value);
 207 }
 208 
 209 int InlineTypeBaseNode::field_offset(uint index) const {
 210   assert(index < field_count(), "index out of bounds");
 211   return inline_klass()->declared_nonstatic_field_at(index)->offset();
 212 }
 213 
 214 uint InlineTypeBaseNode::field_index(int offset) const {
 215   uint i = 0;
 216   for (; i < field_count() && field_offset(i) != offset; i++) { }
 217   assert(i < field_count(), "field not found");
 218   return i;
 219 }
 220 
 221 ciType* InlineTypeBaseNode::field_type(uint index) const {
 222   assert(index < field_count(), "index out of bounds");
 223   return inline_klass()->declared_nonstatic_field_at(index)->type();
 224 }
 225 
 226 bool InlineTypeBaseNode::field_is_flattened(uint index) const {
 227   assert(index < field_count(), "index out of bounds");
 228   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 229   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 230   return field->is_flattened();
 231 }
 232 
 233 bool InlineTypeBaseNode::field_is_null_free(uint index) const {
 234   assert(index < field_count(), "index out of bounds");
 235   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 236   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 237   return field->is_null_free();
 238 }
 239 
 240 void InlineTypeBaseNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 241   ciInlineKlass* vk = inline_klass();
 242   uint nfields = vk->nof_nonstatic_fields();
 243   JVMState* jvms = sfpt->jvms();
 244   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 245   assert(jvms != NULL, "missing JVMS");
 246   uint first_ind = (sfpt->req() - jvms->scloff());
 247   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(inline_ptr(),
 248 #ifdef ASSERT
 249                                                                   NULL,
 250 #endif
 251                                                                   first_ind, nfields);
 252   sobj->init_req(0, igvn->C->root());
 253   // Nullable inline types have an is_init field that needs
 254   // to be checked before using the field values.
 255   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 256     sfpt->add_req(get_is_init());
 257   } else {
 258     sfpt->add_req(igvn->C->top());
 259   }
 260   // Iterate over the inline type fields in order of increasing
 261   // offset and add the field values to the safepoint.
 262   for (uint j = 0; j < nfields; ++j) {
 263     int offset = vk->nonstatic_field_at(j)->offset();
 264     Node* value = field_value_by_offset(offset, true /* include flattened inline type fields */);
 265     if (value->is_InlineTypeBase()) {
 266       // Add inline type field to the worklist to process later
 267       worklist.push(value);
 268     }
 269     sfpt->add_req(value);
 270   }
 271   jvms->set_endoff(sfpt->req());
 272   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 273   igvn->rehash_node_delayed(sfpt);
 274   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 275     Node* debug = sfpt->in(i);
 276     if (debug != NULL && debug->uncast() == this) {
 277       sfpt->set_req(i, sobj);
 278     }
 279   }
 280 }
 281 
 282 void InlineTypeBaseNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 283   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 284   // in the safepoint to avoid keeping field loads live just for the debug info.
 285   Node* oop = get_oop();
 286   bool use_oop = allow_oop && (is_InlineTypePtr() || is_allocated(igvn)) &&
 287                  (oop->is_Con() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 288 
 289   ResourceMark rm;
 290   Unique_Node_List safepoints;
 291   Unique_Node_List vt_worklist;
 292   Unique_Node_List worklist;
 293   worklist.push(this);
 294   while (worklist.size() > 0) {
 295     Node* n = worklist.pop();
 296     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 297       Node* use = n->fast_out(i);
 298       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 299         safepoints.push(use);
 300       } else if (use->is_ConstraintCast()) {
 301         worklist.push(use);
 302       }
 303     }
 304   }
 305 
 306   // Process all safepoint uses and scalarize inline type
 307   while (safepoints.size() > 0) {
 308     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 309     if (use_oop) {
 310       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 311         Node* debug = sfpt->in(i);
 312         if (debug != NULL && debug->uncast() == this) {
 313           sfpt->set_req(i, get_oop());
 314         }
 315       }
 316       igvn->rehash_node_delayed(sfpt);
 317     } else {
 318       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 319     }
 320   }
 321   // Now scalarize non-flattened fields
 322   for (uint i = 0; i < vt_worklist.size(); ++i) {
 323     InlineTypeBaseNode* vt = vt_worklist.at(i)->isa_InlineTypeBase();
 324     vt->make_scalar_in_safepoints(igvn);
 325   }
 326   if (outcnt() == 0) {
 327     igvn->_worklist.push(this);
 328   }
 329 }
 330 
 331 const TypePtr* InlineTypeBaseNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 332   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 333   const TypePtr* adr_type = NULL;
 334   bool is_array = ary_type != NULL;
 335   if ((decorators & C2_MISMATCHED) != 0) {
 336     adr_type = TypeRawPtr::BOTTOM;
 337   } else if (is_array) {
 338     // In the case of a flattened inline type array, each field has its own slice
 339     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 340   } else {
 341     ciField* field = holder->get_field_by_offset(offset, false);
 342     assert(field != NULL, "field not found");
 343     adr_type = gvn.C->alias_type(field)->adr_type();
 344   }
 345   return adr_type;
 346 }
 347 
 348 void InlineTypeBaseNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 349   // Initialize the inline type by loading its field values from
 350   // memory and adding the values as input edges to the node.
 351   for (uint i = 0; i < field_count(); ++i) {
 352     int offset = holder_offset + field_offset(i);
 353     Node* value = NULL;
 354     ciType* ft = field_type(i);
 355     bool null_free = field_is_null_free(i);
 356     if (ft->is_inlinetype() && ft->as_inline_klass()->is_empty()) {
 357       // Loading from a field of an empty inline type. Just return the default instance.
 358       value = InlineTypeNode::make_default(kit->gvn(), ft->as_inline_klass());
 359     } else if (field_is_flattened(i)) {
 360       // Recursively load the flattened inline type field
 361       value = InlineTypeNode::make_from_flattened(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators);
 362     } else {
 363       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 364       bool is_array = (oop_ptr->isa_aryptr() != NULL);
 365       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 366       if (base->is_Con() && !is_array && !mismatched) {
 367         // If the oop to the inline type is constant (static final field), we can
 368         // also treat the fields as constants because the inline type is immutable.
 369         ciObject* constant_oop = oop_ptr->const_oop();
 370         ciField* field = holder->get_field_by_offset(offset, false);
 371         assert(field != NULL, "field not found");
 372         ciConstant constant = constant_oop->as_instance()->field_value(field);
 373         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 374         assert(con_type != NULL, "type not found");
 375         value = kit->gvn().transform(kit->makecon(con_type));
 376         // Check type of constant which might be more precise than the static field type
 377         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 378           ft = con_type->inline_klass();
 379           null_free = true;
 380         }
 381       } else {
 382         // Load field value from memory
 383         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 384         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 385         BasicType bt = type2field[ft->basic_type()];
 386         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 387         const Type* val_type = Type::get_const_type(ft);
 388         if (is_array) {
 389           decorators |= IS_ARRAY;
 390         }
 391         value = kit->access_load_at(base, adr, adr_type, val_type, bt, decorators);
 392       }
 393       // Loading a non-flattened inline type from memory
 394       if (ft->is_inlinetype()) {
 395         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass(), null_free);
 396       }
 397     }
 398     set_field_value(i, value);
 399   }
 400 }
 401 
 402 void InlineTypeBaseNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 403   if (kit->gvn().type(base)->isa_aryptr()) {
 404     kit->C->set_flattened_accesses();
 405   }
 406   // The inline type is embedded into the object without an oop header. Subtract the
 407   // offset of the first field to account for the missing header when storing the values.
 408   if (holder == NULL) {
 409     holder = inline_klass();
 410   }
 411   holder_offset -= inline_klass()->first_field_offset();
 412   store(kit, base, ptr, holder, holder_offset, decorators);
 413 }
 414 
 415 void InlineTypeBaseNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 416   // Write field values to memory
 417   for (uint i = 0; i < field_count(); ++i) {
 418     int offset = holder_offset + field_offset(i);
 419     Node* value = field_value(i);
 420     ciType* ft = field_type(i);
 421     if (field_is_flattened(i)) {
 422       // Recursively store the flattened inline type field
 423       if (!value->is_InlineType()) {
 424         assert(!kit->gvn().type(value)->maybe_null(), "Inline types are null-free");
 425         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass());
 426       }
 427       value->as_InlineType()->store_flattened(kit, base, ptr, holder, offset, decorators);
 428     } else {
 429       // Store field value to memory
 430       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 431       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 432       BasicType bt = type2field[ft->basic_type()];
 433       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 434       const Type* val_type = Type::get_const_type(ft);
 435       const TypeAryPtr* ary_type = kit->gvn().type(base)->isa_aryptr();
 436       if (ary_type != NULL) {
 437         decorators |= IS_ARRAY;
 438       }
 439       kit->access_store_at(base, adr, adr_type, value, val_type, bt, decorators);
 440     }
 441   }
 442 }
 443 
 444 InlineTypePtrNode* InlineTypeBaseNode::buffer(GraphKit* kit, bool safe_for_replace) {
 445   assert(is_InlineType(), "sanity");
 446   // Check if inline type is already allocated
 447   Node* null_ctl = kit->top();
 448   Node* not_null_oop = kit->null_check_oop(get_oop(), &null_ctl);
 449   if (null_ctl->is_top()) {
 450     // Inline type is allocated
 451     return as_ptr(&kit->gvn());
 452   }
 453   assert(!is_allocated(&kit->gvn()), "should not be allocated");
 454   RegionNode* region = new RegionNode(3);
 455 
 456   // Oop is non-NULL, use it
 457   region->init_req(1, kit->control());
 458   PhiNode* oop = PhiNode::make(region, not_null_oop, inline_ptr()->join_speculative(TypePtr::NOTNULL));
 459   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 460   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 461 
 462   int bci = kit->bci();
 463   bool reexecute = kit->jvms()->should_reexecute();
 464   {
 465     // Oop is NULL, allocate and initialize buffer
 466     PreserveJVMState pjvms(kit);
 467     // Propagate re-execution state and bci
 468     kit->set_bci(bci);
 469     kit->jvms()->set_bci(bci);
 470     kit->jvms()->set_should_reexecute(reexecute);
 471     kit->set_control(null_ctl);
 472     kit->kill_dead_locals();
 473     ciInlineKlass* vk = inline_klass();
 474     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 475     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true, this);
 476     store(kit, alloc_oop, alloc_oop, vk);
 477 
 478     // Do not let stores that initialize this buffer be reordered with a subsequent
 479     // store that would make this buffer accessible by other threads.
 480     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 481     assert(alloc != NULL, "must have an allocation node");
 482     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 483 
 484     region->init_req(2, kit->control());
 485     oop   ->init_req(2, alloc_oop);
 486     io    ->init_req(2, kit->i_o());
 487     mem   ->init_req(2, kit->merged_memory());
 488   }
 489 
 490   // Update GraphKit
 491   kit->set_control(kit->gvn().transform(region));
 492   kit->set_i_o(kit->gvn().transform(io));
 493   kit->set_all_memory(kit->gvn().transform(mem));
 494   kit->record_for_igvn(region);
 495   kit->record_for_igvn(oop);
 496   kit->record_for_igvn(io);
 497   kit->record_for_igvn(mem);
 498 
 499   // Use cloned InlineTypeNode to propagate oop from now on
 500   Node* res_oop = kit->gvn().transform(oop);
 501   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
 502   vt->set_oop(res_oop);
 503   vt = kit->gvn().transform(vt)->as_InlineTypeBase();
 504   if (safe_for_replace) {
 505     kit->replace_in_map(this, vt);
 506   }
 507   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 508   // Make sure it gets a chance to remove this allocation.
 509   kit->C->set_has_split_ifs(true);
 510   return vt->as_ptr(&kit->gvn());
 511 }
 512 
 513 bool InlineTypeBaseNode::is_allocated(PhaseGVN* phase) const {
 514   Node* oop = get_oop();
 515   const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type();
 516   return !oop_type->maybe_null();
 517 }
 518 
 519 InlineTypePtrNode* InlineTypeBaseNode::as_ptr(PhaseGVN* phase) const {
 520   assert(is_allocated(phase), "must be allocated");
 521   if (is_InlineTypePtr()) {
 522     return as_InlineTypePtr();
 523   }
 524   return phase->transform(new InlineTypePtrNode(this))->as_InlineTypePtr();
 525 }
 526 
 527 // When a call returns multiple values, it has several result
 528 // projections, one per field. Replacing the result of the call by an
 529 // inline type node (after late inlining) requires that for each result
 530 // projection, we find the corresponding inline type field.
 531 void InlineTypeBaseNode::replace_call_results(GraphKit* kit, Node* call, Compile* C) {
 532   ciInlineKlass* vk = inline_klass();
 533   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 534     ProjNode* pn = call->fast_out(i)->as_Proj();
 535     uint con = pn->_con;
 536     Node* field = NULL;
 537     if (con == TypeFunc::Parms) {
 538       field = get_oop();
 539     } else if (con > TypeFunc::Parms) {
 540       uint field_nb = con - (TypeFunc::Parms+1);
 541       int extra = 0;
 542       for (uint j = 0; j < field_nb - extra; j++) {
 543         ciField* f = vk->nonstatic_field_at(j);
 544         BasicType bt = f->type()->basic_type();
 545         if (bt == T_LONG || bt == T_DOUBLE) {
 546           extra++;
 547         }
 548       }
 549       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 550       field = field_value_by_offset(f->offset(), true);
 551       if (field->is_InlineType()) {
 552         assert(field->as_InlineType()->is_allocated(&kit->gvn()), "must be allocated");
 553         field = field->as_InlineType()->get_oop();
 554       }
 555     }
 556     if (field != NULL) {
 557       C->gvn_replace_by(pn, field);
 558       C->initial_gvn()->hash_delete(pn);
 559       pn->set_req(0, C->top());
 560       --i; --imax;
 561     }
 562   }
 563 }
 564 
 565 Node* InlineTypeBaseNode::allocate_fields(GraphKit* kit) {
 566   InlineTypeBaseNode* vt = clone()->as_InlineTypeBase();
 567   for (uint i = 0; i < field_count(); i++) {
 568      InlineTypeNode* value = field_value(i)->isa_InlineType();
 569      if (field_is_flattened(i)) {
 570        // Flattened inline type field
 571        vt->set_field_value(i, value->allocate_fields(kit));
 572      } else if (value != NULL) {
 573        // Non-flattened inline type field
 574        vt->set_field_value(i, value->buffer(kit));
 575      }
 576   }
 577   vt = kit->gvn().transform(vt)->as_InlineTypeBase();
 578   kit->replace_in_map(this, vt);
 579   return vt;
 580 }
 581 
 582 Node* InlineTypeBaseNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 583   if (phase->C->scalarize_in_safepoints() && can_reshape) {
 584     PhaseIterGVN* igvn = phase->is_IterGVN();
 585     make_scalar_in_safepoints(igvn);
 586     if (outcnt() == 0) {
 587       return NULL;
 588     }
 589   }
 590   Node* is_init = get_is_init();
 591   if (is_init->isa_InlineTypePtr()) {
 592     set_req(IsInit, is_init->as_InlineTypePtr()->get_is_init());
 593     return this;
 594   }
 595   Node* oop = get_oop();
 596   if (oop->isa_InlineTypePtr() && !phase->type(oop)->maybe_null()) {
 597     InlineTypePtrNode* vtptr = oop->as_InlineTypePtr();
 598     set_oop(vtptr->get_oop());
 599     set_is_init(*phase);
 600     for (uint i = Values; i < vtptr->req(); ++i) {
 601       set_req(i, vtptr->in(i));
 602     }
 603     return this;
 604   }
 605   return NULL;
 606 }
 607 
 608 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk) {
 609   // Create a new InlineTypeNode with uninitialized values and NULL oop
 610   Node* oop = vk->is_empty() ? default_oop(gvn, vk) : gvn.zerocon(T_INLINE_TYPE);
 611   InlineTypeNode* vt = new InlineTypeNode(vk, oop);
 612   vt->set_is_init(gvn);
 613   return vt;
 614 }
 615 
 616 Node* InlineTypeBaseNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 617   // Returns the constant oop of the default inline type allocation
 618   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 619 }
 620 
 621 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk) {
 622   // Create a new InlineTypeNode with default values
 623   InlineTypeNode* vt = new InlineTypeNode(vk, default_oop(gvn, vk));
 624   vt->set_is_init(gvn);
 625   for (uint i = 0; i < vt->field_count(); ++i) {
 626     ciType* field_type = vt->field_type(i);
 627     Node* value = gvn.zerocon(field_type->basic_type());
 628     if (field_type->is_inlinetype()) {
 629       ciInlineKlass* vk = field_type->as_inline_klass();
 630       if (vt->field_is_null_free(i)) {
 631         value = make_default(gvn, vk);
 632       } else {
 633         value = InlineTypePtrNode::make_null(gvn, vk);
 634       }
 635     }
 636     vt->set_field_value(i, value);
 637   }
 638   vt = gvn.transform(vt)->as_InlineType();
 639   assert(vt->is_default(&gvn), "must be the default inline type");
 640   return vt;
 641 }
 642 
 643 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
 644   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT));
 645   vt->set_req(IsInit, gvn.intcon(0));
 646   for (uint i = 0; i < vt->field_count(); i++) {
 647     ciType* field_type = vt->field_type(i);
 648     Node* value = gvn.zerocon(field_type->basic_type());
 649     if (field_type->is_inlinetype()) {
 650       if (vt->field_is_null_free(i)) {
 651         value = InlineTypeNode::make_null(gvn, field_type->as_inline_klass());
 652       } else {
 653         value = InlineTypePtrNode::make_null(gvn, field_type->as_inline_klass());
 654       }
 655     }
 656     vt->set_field_value(i, value);
 657   }
 658   return gvn.transform(vt)->as_InlineType();
 659 }
 660 
 661 bool InlineTypeBaseNode::is_default(PhaseGVN* gvn) const {
 662   for (uint i = 0; i < field_count(); ++i) {
 663     Node* value = field_value(i);
 664     if (value->is_InlineTypePtr()) {
 665       value = value->as_InlineTypePtr()->get_oop();
 666     }
 667     if (!gvn->type(value)->is_zero_type() &&
 668         !(field_is_null_free(i) && value->is_InlineType() && value->as_InlineType()->is_default(gvn))) {
 669       return false;
 670     }
 671   }
 672   return true;
 673 }
 674 
 675 Node* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free) {
 676   PhaseGVN& gvn = kit->gvn();
 677 
 678   if (vk->is_empty()) {
 679     Node* def = make_default(gvn, vk);
 680     if (!null_free) {
 681       def = gvn.transform(new InlineTypePtrNode(def->as_InlineType(), false));
 682     }
 683     kit->record_for_igvn(def);
 684     return def;
 685   }
 686   // Create and initialize an InlineTypeNode by loading all field
 687   // values from a heap-allocated version and also save the oop.
 688   InlineTypeBaseNode* vt = NULL;
 689 
 690   if (oop->uncast()->isa_InlineTypePtr()) {
 691     InlineTypePtrNode* vtptr = oop->uncast()->as_InlineTypePtr();
 692     if (!null_free) {
 693       return vtptr;
 694     }
 695     vt = new InlineTypeNode(vk, vtptr->get_oop());
 696     vt->set_is_init(gvn);
 697     for (uint i = Values; i < vtptr->req(); ++i) {
 698       vt->init_req(i, vtptr->in(i));
 699     }
 700     kit->record_for_igvn(vt);
 701     return gvn.transform(vt);
 702   } else if (gvn.type(oop)->maybe_null()) {
 703     // Add a null check because the oop may be null
 704     Node* null_ctl = kit->top();
 705     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 706     if (kit->stopped()) {
 707       // Constant null
 708       kit->set_control(null_ctl);
 709       if (null_free) {
 710         vt = make_default(gvn, vk);
 711       } else {
 712         vt = InlineTypePtrNode::make_null(gvn, vk);
 713       }
 714       kit->record_for_igvn(vt);
 715       return vt;
 716     }
 717     if (null_free) {
 718       vt = new InlineTypeNode(vk, not_null_oop);
 719     } else {
 720       vt = new InlineTypePtrNode(vk, not_null_oop);
 721     }
 722     vt->set_is_init(gvn);
 723     vt->load(kit, not_null_oop, not_null_oop, vk, /* holder_offset */ 0);
 724 
 725     if (null_ctl != kit->top()) {
 726       InlineTypeBaseNode* null_vt = NULL;
 727       if (null_free) {
 728         null_vt = make_default(gvn, vk);
 729       } else {
 730         null_vt = InlineTypePtrNode::make_null(gvn, vk);
 731       }
 732       Node* region = new RegionNode(3);
 733       region->init_req(1, kit->control());
 734       region->init_req(2, null_ctl);
 735 
 736       vt = vt->clone_with_phis(&gvn, region);
 737       vt->merge_with(&gvn, null_vt, 2, true);
 738       kit->set_control(gvn.transform(region));
 739     }
 740   } else {
 741     if (null_free) {
 742       vt = new InlineTypeNode(vk, oop);
 743     } else {
 744       vt = new InlineTypePtrNode(vk, oop);
 745     }
 746     // Oop can never be null
 747     Node* init_ctl = kit->control();
 748     vt->set_is_init(gvn);
 749     vt->load(kit, oop, oop, vk, /* holder_offset */ 0);
 750     assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineTypePtr ||
 751            AllocateNode::Ideal_allocation(oop, &gvn) != NULL || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 752   }
 753   assert(!null_free || vt->is_allocated(&gvn), "inline type should be allocated");
 754   kit->record_for_igvn(vt);
 755   return gvn.transform(vt);
 756 }
 757 
 758 // GraphKit wrapper for the 'make_from_flattened' method
 759 InlineTypeNode* InlineTypeNode::make_from_flattened(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 760   if (kit->gvn().type(obj)->isa_aryptr()) {
 761     kit->C->set_flattened_accesses();
 762   }
 763   // Create and initialize an InlineTypeNode by loading all field values from
 764   // a flattened inline type field at 'holder_offset' or from an inline type array.
 765   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 766   // The inline type is flattened into the object without an oop header. Subtract the
 767   // offset of the first field to account for the missing header when loading the values.
 768   holder_offset -= vk->first_field_offset();
 769   vt->load(kit, obj, ptr, holder, holder_offset, decorators);
 770   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 771   return kit->gvn().transform(vt)->as_InlineType();
 772 }
 773 
 774 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in) {
 775   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 776   if (!in) {
 777     // Keep track of the oop. The returned inline type might already be buffered.
 778     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 779     vt->set_oop(oop);
 780   }
 781   vt->initialize_fields(kit, multi, base_input, in);
 782   return kit->gvn().transform(vt)->as_InlineType();
 783 }
 784 
 785 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 786   ciInlineKlass* vk = inline_klass();
 787   InlineTypeNode* res = clone()->as_InlineType();
 788   if (allocate) {
 789     // Re-execute if buffering triggers deoptimization
 790     PreserveReexecuteState preexecs(kit);
 791     kit->jvms()->set_should_reexecute(true);
 792     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 793     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, true);
 794     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 795     alloc->_larval = true;
 796 
 797     store(kit, alloc_oop, alloc_oop, vk);
 798     res->set_oop(alloc_oop);
 799   }
 800   res->set_type(TypeInlineType::make(vk, true));
 801   res = kit->gvn().transform(res)->as_InlineType();
 802   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
 803   return res;
 804 }
 805 
 806 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
 807   Node* obj = get_oop();
 808   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
 809   Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 810   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
 811   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
 812 
 813   // Do not let stores that initialize this buffer be reordered with a subsequent
 814   // store that would make this buffer accessible by other threads.
 815   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj, &kit->gvn());
 816   assert(alloc != NULL, "must have an allocation node");
 817   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 818 
 819   ciInlineKlass* vk = inline_klass();
 820   InlineTypeNode* res = clone()->as_InlineType();
 821   res->set_type(TypeInlineType::make(vk, false));
 822   res = kit->gvn().transform(res)->as_InlineType();
 823   return res;
 824 }
 825 
 826 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
 827   if (vk == NULL) {
 828     vk = inline_klass();
 829   }
 830   if (field_count() == 0) {
 831     assert(is_allocated(phase), "must be allocated");
 832     return get_oop();
 833   }
 834   for (uint i = 0; i < field_count(); ++i) {
 835     int offset = holder_offset + field_offset(i);
 836     Node* value = field_value(i);
 837     if (value->is_InlineTypeBase()) {
 838       InlineTypeBaseNode* vt = value->as_InlineTypeBase();
 839       if (vt->type()->inline_klass()->is_empty()) {
 840         continue;
 841       } else if (field_is_flattened(i) && vt->is_InlineType()) {
 842         // Check inline type field load recursively
 843         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
 844         if (base == NULL) {
 845           return NULL;
 846         }
 847         continue;
 848       } else {
 849         value = vt->get_oop();
 850         if (value->Opcode() == Op_CastPP) {
 851           // Skip CastPP
 852           value = value->in(1);
 853         }
 854       }
 855     }
 856     if (value->isa_DecodeN()) {
 857       // Skip DecodeN
 858       value = value->in(1);
 859     }
 860     if (value->isa_Load()) {
 861       // Check if base and offset of field load matches inline type layout
 862       intptr_t loffset = 0;
 863       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
 864       if (lbase == NULL || (lbase != base && base != NULL) || loffset != offset) {
 865         return NULL;
 866       } else if (base == NULL) {
 867         // Set base and check if pointer type matches
 868         base = lbase;
 869         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
 870         if (vtptr == NULL || !vtptr->klass()->equals(vk)) {
 871           return NULL;
 872         }
 873       }
 874     } else {
 875       return NULL;
 876     }
 877   }
 878   return base;
 879 }
 880 
 881 Node* InlineTypeBaseNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
 882   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
 883   intptr_t bits = tk->get_con();
 884   set_nth_bit(bits, 0);
 885   return gvn.makecon(TypeRawPtr::make((address)bits));
 886 }
 887 
 888 void InlineTypeBaseNode::pass_fields(GraphKit* kit, Node* n, uint& base_input) {
 889   for (uint i = 0; i < field_count(); i++) {
 890     int offset = field_offset(i);
 891     ciType* type = field_type(i);
 892     Node* arg = field_value(i);
 893 
 894     if (field_is_flattened(i)) {
 895       // Flattened inline type field
 896       InlineTypeNode* vt = arg->as_InlineType();
 897       vt->pass_fields(kit, n, base_input);
 898     } else {
 899       if (arg->is_InlineType()) {
 900         // Non-flattened inline type field
 901         InlineTypeNode* vt = arg->as_InlineType();
 902         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
 903         arg = vt->buffer(kit);
 904       }
 905       // Initialize call/return arguments
 906       BasicType bt = field_type(i)->basic_type();
 907       n->init_req(base_input++, arg);
 908       if (type2size[bt] == 2) {
 909         n->init_req(base_input++, kit->top());
 910       }
 911     }
 912   }
 913 }
 914 
 915 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in) {
 916   PhaseGVN& gvn = kit->gvn();
 917   for (uint i = 0; i < field_count(); ++i) {
 918     ciType* type = field_type(i);
 919     bool null_free = field_is_null_free(i);
 920     Node* parm = NULL;
 921     if (field_is_flattened(i)) {
 922       // Flattened inline type field
 923       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
 924       vt->initialize_fields(kit, multi, base_input, in);
 925       parm = gvn.transform(vt);
 926     } else {
 927       if (multi->is_Start()) {
 928         assert(in, "return from start?");
 929         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
 930       } else if (in) {
 931         parm = multi->as_Call()->in(base_input);
 932       } else {
 933         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
 934       }
 935       // Non-flattened inline type field
 936       if (type->is_inlinetype()) {
 937         parm = make_from_oop(kit, parm, type->as_inline_klass(), null_free);
 938       }
 939       BasicType bt = type->basic_type();
 940       base_input += type2size[bt];
 941     }
 942     assert(parm != NULL, "should never be null");
 943     assert(field_value(i) == NULL, "already set");
 944     set_field_value(i, parm);
 945     gvn.record_for_igvn(parm);
 946   }
 947 }
 948 
 949 // Replace a buffer allocation by a dominating allocation
 950 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 951   // Remove initializing stores and GC barriers
 952   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 953     Node* use = res->fast_out(i);
 954     if (use->is_AddP()) {
 955       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 956         Node* store = use->fast_out(j)->isa_Store();
 957         if (store != NULL) {
 958           igvn->rehash_node_delayed(store);
 959           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 960         }
 961       }
 962     } else if (use->Opcode() == Op_CastP2X) {
 963       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 964         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 965         // we store into, as well as the value we are storing. Skip if this is a
 966         // barrier for storing 'res' into another object.
 967         continue;
 968       }
 969       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 970       bs->eliminate_gc_barrier(igvn, use);
 971       --i; --imax;
 972     }
 973   }
 974   igvn->replace_node(res, dom);
 975 }
 976 
 977 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 978   Node* oop = get_oop();
 979   if (is_default(phase) && (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 980     // Use the pre-allocated oop for default inline types
 981     set_oop(default_oop(*phase, inline_klass()));
 982     assert(is_allocated(phase), "should now be allocated");
 983     return this;
 984   }
 985 
 986   if (!is_allocated(phase)) {
 987     // Save base oop if fields are loaded from memory and the inline
 988     // type is not buffered (in this case we should not use the oop).
 989     Node* base = is_loaded(phase);
 990     if (base != NULL && !phase->type(base)->maybe_null()) {
 991       set_oop(base);
 992       assert(is_allocated(phase), "should now be allocated");
 993       return this;
 994     }
 995   }
 996 
 997   if (can_reshape) {
 998     PhaseIterGVN* igvn = phase->is_IterGVN();
 999 
1000     if (is_allocated(phase)) {
1001       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1002       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1003       // This can happen with late inlining when we first allocate an inline type argument
1004       // but later decide to inline the call after the callee code also triggered allocation.
1005       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1006         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1007         if (alloc != NULL && alloc->in(AllocateNode::InlineTypeNode) == this && !alloc->_is_scalar_replaceable) {
1008           // Found a re-allocation
1009           Node* res = alloc->result_cast();
1010           if (res != NULL && res->is_CheckCastPP()) {
1011             // Replace allocation by oop and unlink AllocateNode
1012             replace_allocation(igvn, res, oop);
1013             igvn->replace_input_of(alloc, AllocateNode::InlineTypeNode, igvn->C->top());
1014             --i; --imax;
1015           }
1016         }
1017       }
1018     }
1019   }
1020   return InlineTypeBaseNode::Ideal(phase, can_reshape);
1021 }
1022 
1023 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1024 void InlineTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase) {
1025   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1026   // will be removed anyway and changing the memory chain will confuse other optimizations.
1027   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1028     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1029     if (alloc != NULL && alloc->in(AllocateNode::InlineTypeNode) == this && !alloc->_is_scalar_replaceable) {
1030       Node* res = alloc->result_cast();
1031       if (res == NULL || !res->is_CheckCastPP()) {
1032         break; // No unique CheckCastPP
1033       }
1034       assert(!is_default(igvn) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1035       // Search for a dominating allocation of the same inline type
1036       Node* res_dom = res;
1037       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1038         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1039         if (alloc_other != NULL && alloc_other->in(AllocateNode::InlineTypeNode) == this && !alloc_other->_is_scalar_replaceable) {
1040           Node* res_other = alloc_other->result_cast();
1041           if (res_other != NULL && res_other->is_CheckCastPP() && res_other != res_dom &&
1042               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1043             res_dom = res_other;
1044           }
1045         }
1046       }
1047       if (res_dom != res) {
1048         // Replace allocation by dominating one.
1049         replace_allocation(igvn, res, res_dom);
1050         // The result of the dominated allocation is now unused and will be removed
1051         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1052         igvn->_worklist.push(alloc);
1053       }
1054     }
1055   }
1056 
1057   // Process users
1058   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1059     Node* out = fast_out(i);
1060     if (out->is_InlineType()) {
1061       // Recursively process inline type users
1062       igvn->rehash_node_delayed(out);
1063       out->as_InlineType()->remove_redundant_allocations(igvn, phase);
1064     } else if (out->isa_Allocate() != NULL) {
1065       // Unlink AllocateNode
1066       assert(out->in(AllocateNode::InlineTypeNode) == this, "should be linked");
1067       igvn->replace_input_of(out, AllocateNode::InlineTypeNode, igvn->C->top());
1068       --i; --imax;
1069     }
1070   }
1071 }
1072 
1073 InlineTypePtrNode* InlineTypePtrNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
1074   InlineTypePtrNode* ptr = new InlineTypePtrNode(vk, gvn.zerocon(T_OBJECT));
1075   ptr->set_req(IsInit, gvn.intcon(0));
1076   for (uint i = 0; i < ptr->field_count(); i++) {
1077     ciType* field_type = ptr->field_type(i);
1078     Node* value = gvn.zerocon(field_type->basic_type());
1079     if (field_type->is_inlinetype()) {
1080       if (ptr->field_is_null_free(i)) {
1081         value = InlineTypeNode::make_null(gvn, field_type->as_inline_klass());
1082       } else {
1083         value = InlineTypePtrNode::make_null(gvn, field_type->as_inline_klass());
1084       }
1085     }
1086     ptr->set_field_value(i, value);
1087   }
1088   return gvn.transform(ptr)->as_InlineTypePtr();
1089 }
1090 
1091 Node* InlineTypePtrNode::Identity(PhaseGVN* phase) {
1092   if (get_oop()->is_InlineTypePtr()) {
1093     return get_oop();
1094   }
1095   return this;
1096 }
1097 
1098 const Type* InlineTypePtrNode::Value(PhaseGVN* phase) const {
1099   const Type* tinit = phase->type(in(IsInit));
1100   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1101     return _type->join_speculative(TypePtr::NOTNULL);
1102   }
1103   return _type;
1104 }