1 /*
   2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/graphKit.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 
  36 uint InlineTypeNode::size_of() const {
  37   return sizeof(*this);
  38 }
  39 
  40 uint InlineTypeNode::hash() const {
  41   return TypeNode::hash() + _is_buffered;
  42 }
  43 
  44 bool InlineTypeNode::cmp(const Node& n) const {
  45   return TypeNode::cmp(n) && ((InlineTypeNode&)n)._is_buffered == _is_buffered;
  46 }
  47 
  48 // Clones the inline type to handle control flow merges involving multiple inline types.
  49 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  50 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, bool is_init) {
  51   InlineTypeNode* vt = clone()->as_InlineType();
  52   if (vt->is_InlineType()) {
  53     // Use nullable type
  54     const Type* t = Type::get_const_type(inline_klass());
  55     gvn->set_type(vt, t);
  56     vt->as_InlineType()->set_type(t);
  57   }
  58 
  59   // Create a PhiNode for merging the oop values
  60   const Type* phi_type = Type::get_const_type(inline_klass());
  61   PhiNode* oop = PhiNode::make(region, vt->get_oop(), phi_type);
  62   gvn->set_type(oop, phi_type);
  63   gvn->record_for_igvn(oop);
  64   vt->set_oop(oop);
  65 
  66   // Create a PhiNode for merging the is_init values
  67   Node* is_init_node;
  68   if (is_init) {
  69     is_init_node = gvn->intcon(1);
  70   } else {
  71     phi_type = Type::get_const_basic_type(T_BOOLEAN);
  72     is_init_node = PhiNode::make(region, vt->get_is_init(), phi_type);
  73     gvn->set_type(is_init_node, phi_type);
  74     gvn->record_for_igvn(is_init_node);
  75   }
  76   vt->set_req(IsInit, is_init_node);
  77 
  78   // Create a PhiNode each for merging the field values
  79   for (uint i = 0; i < vt->field_count(); ++i) {
  80     ciType* type = vt->field_type(i);
  81     Node*  value = vt->field_value(i);
  82     if (value->is_InlineType()) {
  83       // Handle inline type fields recursively
  84       value = value->as_InlineType()->clone_with_phis(gvn, region);
  85     } else {
  86       phi_type = Type::get_const_type(type);
  87       value = PhiNode::make(region, value, phi_type);
  88       gvn->set_type(value, phi_type);
  89       gvn->record_for_igvn(value);
  90     }
  91     vt->set_field_value(i, value);
  92   }
  93   gvn->set_type(vt, vt->bottom_type());
  94   gvn->record_for_igvn(vt);
  95   return vt;
  96 }
  97 
  98 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
  99 // for the given region (see InlineTypeNode::clone_with_phis).
 100 bool InlineTypeNode::has_phi_inputs(Node* region) {
 101   // Check oop input
 102   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 103 #ifdef ASSERT
 104   if (result) {
 105     // Check all field value inputs for consistency
 106     for (uint i = Values; i < field_count(); ++i) {
 107       Node* n = in(i);
 108       if (n->is_InlineType()) {
 109         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 110       } else {
 111         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 112       }
 113     }
 114   }
 115 #endif
 116   return result;
 117 }
 118 
 119 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 120 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 121   _is_buffered = _is_buffered && other->_is_buffered;
 122   // Merge oop inputs
 123   PhiNode* phi = get_oop()->as_Phi();
 124   phi->set_req(pnum, other->get_oop());
 125   if (transform) {
 126     set_oop(gvn->transform(phi));
 127   }
 128 
 129   Node* is_init = get_is_init();
 130   if (is_init->is_Phi()) {
 131     phi = is_init->as_Phi();
 132     phi->set_req(pnum, other->get_is_init());
 133     if (transform) {
 134       set_req(IsInit, gvn->transform(phi));
 135     }
 136   } else {
 137     assert(is_init->find_int_con(0) == 1, "only with a non null inline type");
 138   }
 139 
 140   // Merge field values
 141   for (uint i = 0; i < field_count(); ++i) {
 142     Node* val1 =        field_value(i);
 143     Node* val2 = other->field_value(i);
 144     if (val1->is_InlineType()) {
 145       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 146     } else {
 147       assert(val1->is_Phi(), "must be a phi node");
 148       val1->set_req(pnum, val2);
 149     }
 150     if (transform) {
 151       set_field_value(i, gvn->transform(val1));
 152     }
 153   }
 154   return this;
 155 }
 156 
 157 // Adds a new merge path to an inline type node with phi inputs
 158 void InlineTypeNode::add_new_path(Node* region) {
 159   assert(has_phi_inputs(region), "must have phi inputs");
 160 
 161   PhiNode* phi = get_oop()->as_Phi();
 162   phi->add_req(NULL);
 163   assert(phi->req() == region->req(), "must be same size as region");
 164 
 165   phi = get_is_init()->as_Phi();
 166   phi->add_req(NULL);
 167   assert(phi->req() == region->req(), "must be same size as region");
 168 
 169   for (uint i = 0; i < field_count(); ++i) {
 170     Node* val = field_value(i);
 171     if (val->is_InlineType()) {
 172       val->as_InlineType()->add_new_path(region);
 173     } else {
 174       val->as_Phi()->add_req(NULL);
 175       assert(val->req() == region->req(), "must be same size as region");
 176     }
 177   }
 178 }
 179 
 180 Node* InlineTypeNode::field_value(uint index) const {
 181   assert(index < field_count(), "index out of bounds");
 182   return in(Values + index);
 183 }
 184 
 185 // Get the value of the field at the given offset.
 186 // If 'recursive' is true, flattened inline type fields will be resolved recursively.
 187 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 188   // If the field at 'offset' belongs to a flattened inline type field, 'index' refers to the
 189   // corresponding InlineTypeNode input and 'sub_offset' is the offset in flattened inline type.
 190   int index = inline_klass()->field_index_by_offset(offset);
 191   int sub_offset = offset - field_offset(index);
 192   Node* value = field_value(index);
 193   assert(value != NULL, "field value not found");
 194   if (recursive && value->is_InlineType()) {
 195     if (field_is_flattened(index)) {
 196       // Flattened inline type field
 197       InlineTypeNode* vt = value->as_InlineType();
 198       sub_offset += vt->inline_klass()->first_field_offset(); // Add header size
 199       return vt->field_value_by_offset(sub_offset, recursive);
 200     } else {
 201       assert(sub_offset == 0, "should not have a sub offset");
 202       return value;
 203     }
 204   }
 205   assert(!(recursive && value->is_InlineType()), "should not be an inline type");
 206   assert(sub_offset == 0, "offset mismatch");
 207   return value;
 208 }
 209 
 210 void InlineTypeNode::set_field_value(uint index, Node* value) {
 211   assert(index < field_count(), "index out of bounds");
 212   set_req(Values + index, value);
 213 }
 214 
 215 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 216   set_field_value(field_index(offset), value);
 217 }
 218 
 219 int InlineTypeNode::field_offset(uint index) const {
 220   assert(index < field_count(), "index out of bounds");
 221   return inline_klass()->declared_nonstatic_field_at(index)->offset();
 222 }
 223 
 224 uint InlineTypeNode::field_index(int offset) const {
 225   uint i = 0;
 226   for (; i < field_count() && field_offset(i) != offset; i++) { }
 227   assert(i < field_count(), "field not found");
 228   return i;
 229 }
 230 
 231 ciType* InlineTypeNode::field_type(uint index) const {
 232   assert(index < field_count(), "index out of bounds");
 233   return inline_klass()->declared_nonstatic_field_at(index)->type();
 234 }
 235 
 236 bool InlineTypeNode::field_is_flattened(uint index) const {
 237   assert(index < field_count(), "index out of bounds");
 238   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 239   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 240   return field->is_flattened();
 241 }
 242 
 243 bool InlineTypeNode::field_is_null_free(uint index) const {
 244   assert(index < field_count(), "index out of bounds");
 245   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 246   assert(!field->is_flattened() || field->type()->is_inlinetype(), "must be an inline type");
 247   return field->is_null_free();
 248 }
 249 
 250 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 251   ciInlineKlass* vk = inline_klass();
 252   uint nfields = vk->nof_nonstatic_fields();
 253   JVMState* jvms = sfpt->jvms();
 254   // Replace safepoint edge by SafePointScalarObjectNode and add field values
 255   assert(jvms != NULL, "missing JVMS");
 256   uint first_ind = (sfpt->req() - jvms->scloff());
 257   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 258 #ifdef ASSERT
 259                                                                   NULL,
 260 #endif
 261                                                                   first_ind, nfields);
 262   sobj->init_req(0, igvn->C->root());
 263   // Nullable inline types have an IsInit field that needs
 264   // to be checked before using the field values.
 265   if (!igvn->type(get_is_init())->is_int()->is_con(1)) {
 266     sfpt->add_req(get_is_init());
 267   } else {
 268     sfpt->add_req(igvn->C->top());
 269   }
 270   // Iterate over the inline type fields in order of increasing
 271   // offset and add the field values to the safepoint.
 272   for (uint j = 0; j < nfields; ++j) {
 273     int offset = vk->nonstatic_field_at(j)->offset();
 274     Node* value = field_value_by_offset(offset, true /* include flattened inline type fields */);
 275     if (value->is_InlineType()) {
 276       // Add inline type field to the worklist to process later
 277       worklist.push(value);
 278     }
 279     sfpt->add_req(value);
 280   }
 281   jvms->set_endoff(sfpt->req());
 282   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 283   igvn->rehash_node_delayed(sfpt);
 284   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 285     Node* debug = sfpt->in(i);
 286     if (debug != NULL && debug->uncast() == this) {
 287       sfpt->set_req(i, sobj);
 288     }
 289   }
 290 }
 291 
 292 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 293   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 294   // in the safepoint to avoid keeping field loads live just for the debug info.
 295   Node* oop = get_oop();
 296   bool use_oop = allow_oop && is_allocated(igvn) &&
 297                  (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 298 
 299   ResourceMark rm;
 300   Unique_Node_List safepoints;
 301   Unique_Node_List vt_worklist;
 302   Unique_Node_List worklist;
 303   worklist.push(this);
 304   while (worklist.size() > 0) {
 305     Node* n = worklist.pop();
 306     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 307       Node* use = n->fast_out(i);
 308       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 309         safepoints.push(use);
 310       } else if (use->is_ConstraintCast()) {
 311         worklist.push(use);
 312       }
 313     }
 314   }
 315 
 316   // Process all safepoint uses and scalarize inline type
 317   while (safepoints.size() > 0) {
 318     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 319     if (use_oop) {
 320       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 321         Node* debug = sfpt->in(i);
 322         if (debug != NULL && debug->uncast() == this) {
 323           sfpt->set_req(i, get_oop());
 324         }
 325       }
 326       igvn->rehash_node_delayed(sfpt);
 327     } else {
 328       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 329     }
 330   }
 331   // Now scalarize non-flattened fields
 332   for (uint i = 0; i < vt_worklist.size(); ++i) {
 333     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 334     vt->make_scalar_in_safepoints(igvn);
 335   }
 336   if (outcnt() == 0) {
 337     igvn->_worklist.push(this);
 338   }
 339 }
 340 
 341 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
 342   const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr();
 343   const TypePtr* adr_type = NULL;
 344   bool is_array = ary_type != NULL;
 345   if ((decorators & C2_MISMATCHED) != 0) {
 346     adr_type = TypeRawPtr::BOTTOM;
 347   } else if (is_array) {
 348     // In the case of a flattened inline type array, each field has its own slice
 349     adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot);
 350   } else {
 351     ciField* field = holder->get_field_by_offset(offset, false);
 352     assert(field != NULL, "field not found");
 353     adr_type = gvn.C->alias_type(field)->adr_type();
 354   }
 355   return adr_type;
 356 }
 357 
 358 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 359   // Initialize the inline type by loading its field values from
 360   // memory and adding the values as input edges to the node.
 361   for (uint i = 0; i < field_count(); ++i) {
 362     int offset = holder_offset + field_offset(i);
 363     Node* value = NULL;
 364     ciType* ft = field_type(i);
 365     bool null_free = field_is_null_free(i);
 366     if (null_free && ft->as_inline_klass()->is_empty()) {
 367       // Loading from a field of an empty inline type. Just return the default instance.
 368       value = InlineTypeNode::make_default(kit->gvn(), ft->as_inline_klass());
 369     } else if (field_is_flattened(i)) {
 370       // Recursively load the flattened inline type field
 371       value = InlineTypeNode::make_from_flattened(kit, ft->as_inline_klass(), base, ptr, holder, offset, decorators);
 372     } else {
 373       const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr();
 374       bool is_array = (oop_ptr->isa_aryptr() != NULL);
 375       bool mismatched = (decorators & C2_MISMATCHED) != 0;
 376       if (base->is_Con() && !is_array && !mismatched) {
 377         // If the oop to the inline type is constant (static final field), we can
 378         // also treat the fields as constants because the inline type is immutable.
 379         ciObject* constant_oop = oop_ptr->const_oop();
 380         ciField* field = holder->get_field_by_offset(offset, false);
 381         assert(field != NULL, "field not found");
 382         ciConstant constant = constant_oop->as_instance()->field_value(field);
 383         const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true);
 384         assert(con_type != NULL, "type not found");
 385         value = kit->gvn().transform(kit->makecon(con_type));
 386         // Check type of constant which might be more precise than the static field type
 387         if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) {
 388           ft = con_type->inline_klass();
 389           null_free = true;
 390         }
 391       } else {
 392         // Load field value from memory
 393         const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 394         Node* adr = kit->basic_plus_adr(base, ptr, offset);
 395         BasicType bt = type2field[ft->basic_type()];
 396         assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 397         const Type* val_type = Type::get_const_type(ft);
 398         if (is_array) {
 399           decorators |= IS_ARRAY;
 400         }
 401         value = kit->access_load_at(base, adr, adr_type, val_type, bt, decorators);
 402       }
 403       // Loading a non-flattened inline type from memory
 404       if (ft->is_inlinetype()) {
 405         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass(), null_free);
 406       }
 407     }
 408     set_field_value(i, value);
 409   }
 410 }
 411 
 412 void InlineTypeNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 413   if (kit->gvn().type(base)->isa_aryptr()) {
 414     kit->C->set_flattened_accesses();
 415   }
 416   // The inline type is embedded into the object without an oop header. Subtract the
 417   // offset of the first field to account for the missing header when storing the values.
 418   if (holder == NULL) {
 419     holder = inline_klass();
 420   }
 421   holder_offset -= inline_klass()->first_field_offset();
 422   store(kit, base, ptr, holder, holder_offset, decorators);
 423 }
 424 
 425 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const {
 426   // Write field values to memory
 427   for (uint i = 0; i < field_count(); ++i) {
 428     int offset = holder_offset + field_offset(i);
 429     Node* value = field_value(i);
 430     ciType* ft = field_type(i);
 431     if (field_is_flattened(i)) {
 432       // Recursively store the flattened inline type field
 433       if (!value->is_InlineType()) {
 434         value = InlineTypeNode::make_from_oop(kit, value, ft->as_inline_klass());
 435       }
 436       value->as_InlineType()->store_flattened(kit, base, ptr, holder, offset, decorators);
 437     } else {
 438       // Store field value to memory
 439       const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn());
 440       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 441       BasicType bt = type2field[ft->basic_type()];
 442       assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 443       const Type* val_type = Type::get_const_type(ft);
 444       const TypeAryPtr* ary_type = kit->gvn().type(base)->isa_aryptr();
 445       if (ary_type != NULL) {
 446         decorators |= IS_ARRAY;
 447       }
 448       kit->access_store_at(base, adr, adr_type, value, val_type, bt, decorators);
 449     }
 450   }
 451 }
 452 
 453 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 454   if (_is_buffered) {
 455     // Already buffered
 456     return this;
 457   }
 458 
 459   // Check if inline type is already buffered
 460   Node* not_buffered_ctl = kit->top();
 461   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 462   if (not_buffered_ctl->is_top()) {
 463     // Already buffered
 464     InlineTypeNode* vt = clone()->as_InlineType();
 465     vt->_is_buffered = true;
 466     vt = kit->gvn().transform(vt)->as_InlineType();
 467     if (safe_for_replace) {
 468       kit->replace_in_map(this, vt);
 469     }
 470     return vt;
 471   }
 472   Node* buffered_ctl = kit->control();
 473   kit->set_control(not_buffered_ctl);
 474 
 475   // Inline type is not buffered, check if it is null.
 476   Node* null_ctl = kit->top();
 477   kit->null_check_common(get_is_init(), T_INT, false, &null_ctl);
 478   bool null_free = null_ctl->is_top();
 479 
 480   RegionNode* region = new RegionNode(4);
 481   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 482 
 483   // InlineType is already buffered
 484   region->init_req(1, buffered_ctl);
 485   oop->init_req(1, not_null_oop);
 486 
 487   // InlineType is null
 488   region->init_req(2, null_ctl);
 489   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 490 
 491   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 492   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 493 
 494   int bci = kit->bci();
 495   bool reexecute = kit->jvms()->should_reexecute();
 496   if (!kit->stopped()) {
 497     assert(!is_allocated(&kit->gvn()), "already buffered");
 498 
 499     // Allocate and initialize buffer
 500     PreserveJVMState pjvms(kit);
 501     // Propagate re-execution state and bci
 502     kit->set_bci(bci);
 503     kit->jvms()->set_bci(bci);
 504     kit->jvms()->set_should_reexecute(reexecute);
 505 
 506     kit->kill_dead_locals();
 507     ciInlineKlass* vk = inline_klass();
 508     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 509     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true, this);
 510     store(kit, alloc_oop, alloc_oop, vk);
 511 
 512     // Do not let stores that initialize this buffer be reordered with a subsequent
 513     // store that would make this buffer accessible by other threads.
 514     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 515     assert(alloc != NULL, "must have an allocation node");
 516     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 517 
 518     region->init_req(3, kit->control());
 519     oop   ->init_req(3, alloc_oop);
 520     io    ->init_req(3, kit->i_o());
 521     mem   ->init_req(3, kit->merged_memory());
 522   }
 523 
 524   // Update GraphKit
 525   kit->set_control(kit->gvn().transform(region));
 526   kit->set_i_o(kit->gvn().transform(io));
 527   kit->set_all_memory(kit->gvn().transform(mem));
 528   kit->record_for_igvn(region);
 529   kit->record_for_igvn(oop);
 530   kit->record_for_igvn(io);
 531   kit->record_for_igvn(mem);
 532 
 533   // Use cloned InlineTypeNode to propagate oop from now on
 534   Node* res_oop = kit->gvn().transform(oop);
 535   InlineTypeNode* vt = clone()->as_InlineType();
 536   vt->_is_buffered = true;
 537   vt->set_oop(res_oop);
 538   vt = kit->gvn().transform(vt)->as_InlineType();
 539   if (safe_for_replace) {
 540     kit->replace_in_map(this, vt);
 541   }
 542   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 543   // Make sure it gets a chance to remove this allocation.
 544   kit->C->set_has_split_ifs(true);
 545   return vt;
 546 }
 547 
 548 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 549   if (_is_buffered) {
 550     return true;
 551   }
 552   Node* oop = get_oop();
 553   const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type();
 554   return !oop_type->maybe_null();
 555 }
 556 
 557 // When a call returns multiple values, it has several result
 558 // projections, one per field. Replacing the result of the call by an
 559 // inline type node (after late inlining) requires that for each result
 560 // projection, we find the corresponding inline type field.
 561 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C, bool null_free) {
 562   ciInlineKlass* vk = inline_klass();
 563   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 564     ProjNode* pn = call->fast_out(i)->as_Proj();
 565     uint con = pn->_con;
 566     Node* field = NULL;
 567     if (con == TypeFunc::Parms) {
 568       field = get_oop();
 569     } else if (!null_free && con == (call->tf()->range_cc()->cnt() - 1)) {
 570       field = get_is_init();
 571     } else if (con > TypeFunc::Parms) {
 572       uint field_nb = con - (TypeFunc::Parms+1);
 573       int extra = 0;
 574       for (uint j = 0; j < field_nb - extra; j++) {
 575         ciField* f = vk->nonstatic_field_at(j);
 576         BasicType bt = f->type()->basic_type();
 577         if (bt == T_LONG || bt == T_DOUBLE) {
 578           extra++;
 579         }
 580       }
 581       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 582       field = field_value_by_offset(f->offset(), true);
 583     }
 584     if (field != NULL) {
 585       C->gvn_replace_by(pn, field);
 586       C->initial_gvn()->hash_delete(pn);
 587       pn->set_req(0, C->top());
 588       --i; --imax;
 589     }
 590   }
 591 }
 592 
 593 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 594   InlineTypeNode* vt = clone()->as_InlineType();
 595   for (uint i = 0; i < field_count(); i++) {
 596      Node* value = field_value(i);
 597      if (field_is_flattened(i)) {
 598        // Flattened inline type field
 599        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 600      } else if (value->is_InlineType()) {
 601        // Non-flattened inline type field
 602        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 603      }
 604   }
 605   vt = kit->gvn().transform(vt)->as_InlineType();
 606   kit->replace_in_map(this, vt);
 607   return vt;
 608 }
 609 
 610 // Replace a buffer allocation by a dominating allocation
 611 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 612   // Remove initializing stores and GC barriers
 613   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 614     Node* use = res->fast_out(i);
 615     if (use->is_AddP()) {
 616       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 617         Node* store = use->fast_out(j)->isa_Store();
 618         if (store != NULL) {
 619           igvn->rehash_node_delayed(store);
 620           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 621         }
 622       }
 623     } else if (use->Opcode() == Op_CastP2X) {
 624       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 625         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 626         // we store into, as well as the value we are storing. Skip if this is a
 627         // barrier for storing 'res' into another object.
 628         continue;
 629       }
 630       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 631       bs->eliminate_gc_barrier(igvn, use);
 632       --i; --imax;
 633     }
 634   }
 635   igvn->replace_node(res, dom);
 636 }
 637 
 638 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 639   Node* oop = get_oop();
 640   if (is_default(phase) && inline_klass()->is_initialized() &&
 641       (!oop->is_Con() || phase->type(oop)->is_zero_type())) {
 642     // Use the pre-allocated oop for default inline types
 643     set_oop(default_oop(*phase, inline_klass()));
 644     assert(is_allocated(phase), "should now be allocated");
 645     return this;
 646   }
 647   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 648     InlineTypeNode* vtptr = oop->as_InlineType();
 649     set_oop(vtptr->get_oop());
 650     set_is_init(*phase);
 651     for (uint i = Values; i < vtptr->req(); ++i) {
 652       set_req(i, vtptr->in(i));
 653     }
 654     return this;
 655   }
 656   if (!is_allocated(phase)) {
 657     // Save base oop if fields are loaded from memory and the inline
 658     // type is not buffered (in this case we should not use the oop).
 659     Node* base = is_loaded(phase);
 660     if (base != NULL && !phase->type(base)->maybe_null()) {
 661       set_oop(base);
 662       assert(is_allocated(phase), "should now be allocated");
 663       return this;
 664     }
 665   }
 666 
 667   if (can_reshape) {
 668     PhaseIterGVN* igvn = phase->is_IterGVN();
 669     if (is_allocated(phase)) {
 670       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 671       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 672       // This can happen with late inlining when we first allocate an inline type argument
 673       // but later decide to inline the call after the callee code also triggered allocation.
 674       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 675         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 676         if (alloc != NULL && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 677           // Found a re-allocation
 678           Node* res = alloc->result_cast();
 679           if (res != NULL && res->is_CheckCastPP()) {
 680             // Replace allocation by oop and unlink AllocateNode
 681             replace_allocation(igvn, res, oop);
 682             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 683             --i; --imax;
 684           }
 685         }
 686       }
 687     }
 688   }
 689 
 690   return NULL;
 691 }
 692 
 693 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 694   // Create a new InlineTypeNode with uninitialized values and NULL oop
 695   Node* oop = (vk->is_empty() && vk->is_initialized()) ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 696   InlineTypeNode* vt = new InlineTypeNode(vk, oop, null_free, vk->is_empty() && vk->is_initialized());
 697   vt->set_is_init(gvn);
 698   return vt;
 699 }
 700 
 701 Node* InlineTypeNode::default_oop(PhaseGVN& gvn, ciInlineKlass* vk) {
 702   // Returns the constant oop of the default inline type allocation
 703   return gvn.makecon(TypeInstPtr::make(vk->default_instance()));
 704 }
 705 
 706 InlineTypeNode* InlineTypeNode::make_default(PhaseGVN& gvn, ciInlineKlass* vk) {
 707   // Create a new InlineTypeNode with default values
 708   Node* oop = vk->is_initialized() ? default_oop(gvn, vk) : gvn.zerocon(T_PRIMITIVE_OBJECT);
 709   InlineTypeNode* vt = new InlineTypeNode(vk, oop, true, vk->is_initialized());
 710   vt->set_is_init(gvn);
 711   for (uint i = 0; i < vt->field_count(); ++i) {
 712     ciType* field_type = vt->field_type(i);
 713     Node* value = gvn.zerocon(field_type->basic_type());
 714     if (field_type->is_inlinetype()) {
 715       ciInlineKlass* vk = field_type->as_inline_klass();
 716       if (vt->field_is_null_free(i)) {
 717         value = make_default(gvn, vk);
 718       } else {
 719         value = InlineTypeNode::make_null(gvn, vk);
 720       }
 721     }
 722     vt->set_field_value(i, value);
 723   }
 724   vt = gvn.transform(vt)->as_InlineType();
 725   assert(vt->is_default(&gvn), "must be the default inline type");
 726   return vt;
 727 }
 728 
 729 bool InlineTypeNode::is_default(PhaseGVN* gvn) const {
 730   const Type* tinit = gvn->type(in(IsInit));
 731   if (!tinit->isa_int() || !tinit->is_int()->is_con(1)) {
 732     return false; // May be null
 733   }
 734   for (uint i = 0; i < field_count(); ++i) {
 735     ciType* ft = field_type(i);
 736     Node* value = field_value(i);
 737     if (field_is_null_free(i)) {
 738       if (!value->is_InlineType() || !value->as_InlineType()->is_default(gvn)) {
 739         return false;
 740       }
 741       continue;
 742     } else if (value->is_InlineType()) {
 743       value = value->as_InlineType()->get_oop();
 744     }
 745     if (!gvn->type(value)->is_zero_type()) {
 746       return false;
 747     }
 748   }
 749   return true;
 750 }
 751 
 752 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool null_free) {
 753   PhaseGVN& gvn = kit->gvn();
 754 
 755   if (vk->is_empty() && null_free) {
 756     InlineTypeNode* def = make_default(gvn, vk);
 757     kit->record_for_igvn(def);
 758     return def;
 759   }
 760   // Create and initialize an InlineTypeNode by loading all field
 761   // values from a heap-allocated version and also save the oop.
 762   InlineTypeNode* vt = NULL;
 763 
 764   if (oop->isa_InlineType()) {
 765     return oop->as_InlineType();
 766   } else if (gvn.type(oop)->maybe_null()) {
 767     // Add a null check because the oop may be null
 768     Node* null_ctl = kit->top();
 769     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 770     if (kit->stopped()) {
 771       // Constant null
 772       kit->set_control(null_ctl);
 773       if (null_free) {
 774         vt = make_default(gvn, vk);
 775       } else {
 776         vt = InlineTypeNode::make_null(gvn, vk);
 777       }
 778       kit->record_for_igvn(vt);
 779       return vt;
 780     }
 781     vt = new InlineTypeNode(vk, not_null_oop, null_free, true);
 782     vt->set_is_init(gvn);
 783     vt->load(kit, not_null_oop, not_null_oop, vk, /* holder_offset */ 0);
 784 
 785     if (null_ctl != kit->top()) {
 786       InlineTypeNode* null_vt = NULL;
 787       if (null_free) {
 788         null_vt = make_default(gvn, vk);
 789       } else {
 790         null_vt = InlineTypeNode::make_null(gvn, vk);
 791       }
 792       Node* region = new RegionNode(3);
 793       region->init_req(1, kit->control());
 794       region->init_req(2, null_ctl);
 795 
 796       vt = vt->clone_with_phis(&gvn, region);
 797       vt->merge_with(&gvn, null_vt, 2, true);
 798       if (!null_free) {
 799         vt->set_oop(oop);
 800       }
 801       kit->set_control(gvn.transform(region));
 802     }
 803   } else {
 804     // Oop can never be null
 805     vt = new InlineTypeNode(vk, oop, /* null_free= */ true, true);
 806     Node* init_ctl = kit->control();
 807     vt->set_is_init(gvn);
 808     vt->load(kit, oop, oop, vk, /* holder_offset */ 0);
 809 // TODO 8284443
 810 //    assert(!null_free || vt->as_InlineType()->is_default(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
 811 //           AllocateNode::Ideal_allocation(oop, &gvn) != NULL || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
 812   }
 813   assert(!null_free || vt->is_allocated(&gvn), "inline type should be allocated");
 814   kit->record_for_igvn(vt);
 815   return gvn.transform(vt)->as_InlineType();
 816 }
 817 
 818 // GraphKit wrapper for the 'make_from_flattened' method
 819 InlineTypeNode* InlineTypeNode::make_from_flattened(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) {
 820   if (kit->gvn().type(obj)->isa_aryptr()) {
 821     kit->C->set_flattened_accesses();
 822   }
 823   // Create and initialize an InlineTypeNode by loading all field values from
 824   // a flattened inline type field at 'holder_offset' or from an inline type array.
 825   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk);
 826   // The inline type is flattened into the object without an oop header. Subtract the
 827   // offset of the first field to account for the missing header when loading the values.
 828   holder_offset -= vk->first_field_offset();
 829   vt->load(kit, obj, ptr, holder, holder_offset, decorators);
 830   assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop");
 831   return kit->gvn().transform(vt)->as_InlineType();
 832 }
 833 
 834 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
 835   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
 836   if (!in) {
 837     // Keep track of the oop. The returned inline type might already be buffered.
 838     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
 839     vt->set_oop(oop);
 840   }
 841   vt->initialize_fields(kit, multi, base_input, in, null_free);
 842   return kit->gvn().transform(vt)->as_InlineType();
 843 }
 844 
 845 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const {
 846   ciInlineKlass* vk = inline_klass();
 847   InlineTypeNode* res = InlineTypeNode::make_uninitialized(kit->gvn(), vk);
 848   for (uint i = 1; i < req(); ++i) {
 849     res->set_req(i, in(i));
 850   }
 851 
 852   if (allocate) {
 853     // Re-execute if buffering triggers deoptimization
 854     PreserveReexecuteState preexecs(kit);
 855     kit->jvms()->set_should_reexecute(true);
 856     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 857     Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, true);
 858     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
 859     alloc->_larval = true;
 860 
 861     store(kit, alloc_oop, alloc_oop, vk);
 862     res->set_oop(alloc_oop);
 863   }
 864   // TODO 8239003
 865   //res->set_type(TypeInlineType::make(vk, true));
 866   res = kit->gvn().transform(res)->as_InlineType();
 867   assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated");
 868   return res;
 869 }
 870 
 871 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const {
 872   Node* obj = get_oop();
 873   Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
 874   Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 875   mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place)));
 876   kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered);
 877 
 878   // Do not let stores that initialize this buffer be reordered with a subsequent
 879   // store that would make this buffer accessible by other threads.
 880   AllocateNode* alloc = AllocateNode::Ideal_allocation(obj, &kit->gvn());
 881   assert(alloc != NULL, "must have an allocation node");
 882   kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 883 
 884   ciInlineKlass* vk = inline_klass();
 885   InlineTypeNode* res = InlineTypeNode::make_uninitialized(kit->gvn(), vk);
 886   for (uint i = 1; i < req(); ++i) {
 887     res->set_req(i, in(i));
 888   }
 889   // TODO 8239003
 890   //res->set_type(TypeInlineType::make(vk, false));
 891   res = kit->gvn().transform(res)->as_InlineType();
 892   return res;
 893 }
 894 
 895 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
 896   if (vk == NULL) {
 897     vk = inline_klass();
 898   }
 899   if (field_count() == 0 && vk->is_initialized()) {
 900     const Type* tinit = phase->type(in(IsInit));
 901     if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
 902       assert(is_allocated(phase), "must be allocated");
 903       return get_oop();
 904     } else {
 905       // TODO 8284443
 906       return NULL;
 907     }
 908   }
 909   for (uint i = 0; i < field_count(); ++i) {
 910     int offset = holder_offset + field_offset(i);
 911     Node* value = field_value(i);
 912     if (value->is_InlineType()) {
 913       InlineTypeNode* vt = value->as_InlineType();
 914       if (vt->type()->inline_klass()->is_empty()) {
 915         continue;
 916       } else if (field_is_flattened(i) && vt->is_InlineType()) {
 917         // Check inline type field load recursively
 918         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->first_field_offset());
 919         if (base == NULL) {
 920           return NULL;
 921         }
 922         continue;
 923       } else {
 924         value = vt->get_oop();
 925         if (value->Opcode() == Op_CastPP) {
 926           // Skip CastPP
 927           value = value->in(1);
 928         }
 929       }
 930     }
 931     if (value->isa_DecodeN()) {
 932       // Skip DecodeN
 933       value = value->in(1);
 934     }
 935     if (value->isa_Load()) {
 936       // Check if base and offset of field load matches inline type layout
 937       intptr_t loffset = 0;
 938       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
 939       if (lbase == NULL || (lbase != base && base != NULL) || loffset != offset) {
 940         return NULL;
 941       } else if (base == NULL) {
 942         // Set base and check if pointer type matches
 943         base = lbase;
 944         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
 945         if (vtptr == NULL || !vtptr->instance_klass()->equals(vk)) {
 946           return NULL;
 947         }
 948       }
 949     } else {
 950       return NULL;
 951     }
 952   }
 953   return base;
 954 }
 955 
 956 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
 957   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
 958   intptr_t bits = tk->get_con();
 959   set_nth_bit(bits, 0);
 960   return gvn.longcon((jlong)bits);
 961 }
 962 
 963 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
 964   if (!null_free && in) {
 965     n->init_req(base_input++, get_is_init());
 966   }
 967   for (uint i = 0; i < field_count(); i++) {
 968     Node* arg = field_value(i);
 969     if (field_is_flattened(i)) {
 970       // Flattened inline type field
 971       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
 972     } else {
 973       if (arg->is_InlineType()) {
 974         // Non-flattened inline type field
 975         InlineTypeNode* vt = arg->as_InlineType();
 976         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
 977         arg = vt->buffer(kit);
 978       }
 979       // Initialize call/return arguments
 980       n->init_req(base_input++, arg);
 981       if (field_type(i)->size() == 2) {
 982         n->init_req(base_input++, kit->top());
 983       }
 984     }
 985   }
 986   // The last argument is used to pass IsInit information to compiled code and not required here.
 987   if (!null_free && !in) {
 988     n->init_req(base_input++, kit->top());
 989   }
 990 }
 991 
 992 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region) {
 993   PhaseGVN& gvn = kit->gvn();
 994   Node* is_init = NULL;
 995   if (!null_free) {
 996     // Nullable inline type
 997     if (in) {
 998       // Set IsInit field
 999       if (multi->is_Start()) {
1000         is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1001       } else {
1002         is_init = multi->as_Call()->in(base_input);
1003       }
1004       set_req(IsInit, is_init);
1005       base_input++;
1006     }
1007     // Add a null check to make subsequent loads dependent on
1008     assert(null_check_region == NULL, "already set");
1009     if (is_init == NULL) {
1010       // Will only be initialized below, use dummy node for now
1011       is_init = new Node(1);
1012       gvn.set_type_bottom(is_init);
1013     }
1014     Node* null_ctrl = kit->top();
1015     kit->null_check_common(is_init, T_INT, false, &null_ctrl);
1016     Node* non_null_ctrl = kit->control();
1017     null_check_region = new RegionNode(3);
1018     null_check_region->init_req(1, non_null_ctrl);
1019     null_check_region->init_req(2, null_ctrl);
1020     null_check_region = gvn.transform(null_check_region);
1021     kit->set_control(null_check_region);
1022   }
1023 
1024   for (uint i = 0; i < field_count(); ++i) {
1025     ciType* type = field_type(i);
1026     Node* parm = NULL;
1027     if (field_is_flattened(i)) {
1028       // Flattened inline type field
1029       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass());
1030       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region);
1031       parm = gvn.transform(vt);
1032     } else {
1033       if (multi->is_Start()) {
1034         assert(in, "return from start?");
1035         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1036       } else if (in) {
1037         parm = multi->as_Call()->in(base_input);
1038       } else {
1039         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1040       }
1041       // Non-flattened inline type field
1042       if (type->is_inlinetype()) {
1043         if (null_check_region != NULL) {
1044           // Holder is nullable, set field to NULL if holder is NULL to avoid loading from uninitialized memory
1045           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1046           parm->set_req(2, kit->zerocon(T_OBJECT));
1047           parm = gvn.transform(parm);
1048         }
1049         parm = make_from_oop(kit, parm, type->as_inline_klass(), field_is_null_free(i));
1050       }
1051       base_input += type->size();
1052     }
1053     assert(parm != NULL, "should never be null");
1054     assert(field_value(i) == NULL, "already set");
1055     set_field_value(i, parm);
1056     gvn.record_for_igvn(parm);
1057   }
1058   // The last argument is used to pass IsInit information to compiled code
1059   if (!null_free && !in) {
1060     Node* cmp = is_init->raw_out(0);
1061     is_init= gvn.transform(new ProjNode(multi->as_Call(), base_input));
1062     set_req(IsInit, is_init);
1063     gvn.hash_delete(cmp);
1064     cmp->set_req(1, is_init);
1065     gvn.hash_find_insert(cmp);
1066     base_input++;
1067   }
1068 }
1069 
1070 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1071 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1072 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1073   PhaseIterGVN* igvn = &phase->igvn();
1074   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1075   // will be removed anyway and changing the memory chain will confuse other optimizations.
1076   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1077     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1078     if (alloc != NULL && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1079       Node* res = alloc->result_cast();
1080       if (res == NULL || !res->is_CheckCastPP()) {
1081         break; // No unique CheckCastPP
1082       }
1083       assert((!is_default(igvn) || !inline_klass()->is_initialized()) && !is_allocated(igvn), "re-allocation should be removed by Ideal transformation");
1084       // Search for a dominating allocation of the same inline type
1085       Node* res_dom = res;
1086       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1087         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1088         if (alloc_other != NULL && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1089           Node* res_other = alloc_other->result_cast();
1090           if (res_other != NULL && res_other->is_CheckCastPP() && res_other != res_dom &&
1091               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1092             res_dom = res_other;
1093           }
1094         }
1095       }
1096       if (res_dom != res) {
1097         // Replace allocation by dominating one.
1098         replace_allocation(igvn, res, res_dom);
1099         // The result of the dominated allocation is now unused and will be removed
1100         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1101         igvn->_worklist.push(alloc);
1102       }
1103     }
1104   }
1105 }
1106 
1107 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk) {
1108   InlineTypeNode* ptr = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false, true);
1109   ptr->set_req(IsInit, gvn.intcon(0));
1110   for (uint i = 0; i < ptr->field_count(); i++) {
1111     ciType* field_type = ptr->field_type(i);
1112     Node* value = gvn.zerocon(field_type->basic_type());
1113     if (field_type->is_inlinetype()) {
1114       value = InlineTypeNode::make_null(gvn, field_type->as_inline_klass());
1115     }
1116     ptr->set_field_value(i, value);
1117   }
1118   return gvn.transform(ptr)->as_InlineType();
1119 }
1120 
1121 Node* InlineTypeNode::Identity(PhaseGVN* phase) {
1122   if (get_oop()->is_InlineType()) {
1123     return get_oop();
1124   }
1125   return this;
1126 }
1127 
1128 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1129   Node* oop = get_oop();
1130   const Type* toop = phase->type(oop);
1131 #ifdef ASSERT
1132   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1133     // We are not allocated (anymore) and should therefore not have an instance id
1134     dump(1);
1135     assert(false, "Unbuffered inline type should not have known instance id");
1136   }
1137 #endif
1138   const Type* t = toop->filter_speculative(_type);
1139   if (t->singleton()) {
1140     // Don't replace InlineType by a constant
1141     t = _type;
1142   }
1143   const Type* tinit = phase->type(in(IsInit));
1144   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1145     t = t->join_speculative(TypePtr::NOTNULL);
1146   }
1147   return t;
1148 }