1 /* 2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/ciInlineKlass.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/gc_globals.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/castnode.hpp" 30 #include "opto/convertnode.hpp" 31 #include "opto/graphKit.hpp" 32 #include "opto/idealKit.hpp" 33 #include "opto/inlinetypenode.hpp" 34 #include "opto/movenode.hpp" 35 #include "opto/narrowptrnode.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/phaseX.hpp" 38 39 // Clones the inline type to handle control flow merges involving multiple inline types. 40 // The inputs are replaced by PhiNodes to represent the merged values for the given region. 41 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) { 42 InlineTypeNode* vt = clone_if_required(gvn, map); 43 const Type* t = Type::get_const_type(inline_klass()); 44 gvn->set_type(vt, t); 45 vt->as_InlineType()->set_type(t); 46 47 // Create a PhiNode for merging the oop values 48 PhiNode* oop = PhiNode::make(region, vt->get_oop(), t); 49 gvn->set_type(oop, t); 50 gvn->record_for_igvn(oop); 51 vt->set_oop(*gvn, oop); 52 53 // Create a PhiNode for merging the is_buffered values 54 t = Type::get_const_basic_type(T_BOOLEAN); 55 Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t); 56 gvn->set_type(is_buffered_node, t); 57 gvn->record_for_igvn(is_buffered_node); 58 vt->set_req(IsBuffered, is_buffered_node); 59 60 // Create a PhiNode for merging the is_init values 61 Node* is_init_node; 62 if (is_init) { 63 is_init_node = gvn->intcon(1); 64 } else { 65 t = Type::get_const_basic_type(T_BOOLEAN); 66 is_init_node = PhiNode::make(region, vt->get_is_init(), t); 67 gvn->set_type(is_init_node, t); 68 gvn->record_for_igvn(is_init_node); 69 } 70 vt->set_req(IsInit, is_init_node); 71 72 // Create a PhiNode each for merging the field values 73 for (uint i = 0; i < vt->field_count(); ++i) { 74 ciType* type = vt->field_type(i); 75 Node* value = vt->field_value(i); 76 // We limit scalarization for inline types with circular fields and can therefore observe nodes 77 // of the same type but with different scalarization depth during GVN. To avoid inconsistencies 78 // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized. 79 bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i); 80 if (type->is_inlinetype() && no_circularity) { 81 // Handle inline type fields recursively 82 value = value->as_InlineType()->clone_with_phis(gvn, region, map); 83 } else { 84 t = Type::get_const_type(type); 85 value = PhiNode::make(region, value, t); 86 gvn->set_type(value, t); 87 gvn->record_for_igvn(value); 88 } 89 vt->set_field_value(i, value); 90 } 91 gvn->record_for_igvn(vt); 92 return vt; 93 } 94 95 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes 96 // for the given region (see InlineTypeNode::clone_with_phis). 97 bool InlineTypeNode::has_phi_inputs(Node* region) { 98 // Check oop input 99 bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region; 100 #ifdef ASSERT 101 if (result) { 102 // Check all field value inputs for consistency 103 for (uint i = Values; i < field_count(); ++i) { 104 Node* n = in(i); 105 if (n->is_InlineType()) { 106 assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs"); 107 } else { 108 assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs"); 109 } 110 } 111 } 112 #endif 113 return result; 114 } 115 116 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis' 117 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) { 118 assert(inline_klass() == other->inline_klass(), "Merging incompatible types"); 119 120 // Merge oop inputs 121 PhiNode* phi = get_oop()->as_Phi(); 122 phi->set_req(pnum, other->get_oop()); 123 if (transform) { 124 set_oop(*gvn, gvn->transform(phi)); 125 } 126 127 // Merge is_buffered inputs 128 phi = get_is_buffered()->as_Phi(); 129 phi->set_req(pnum, other->get_is_buffered()); 130 if (transform) { 131 set_req(IsBuffered, gvn->transform(phi)); 132 } 133 134 // Merge is_init inputs 135 Node* is_init = get_is_init(); 136 if (is_init->is_Phi()) { 137 phi = is_init->as_Phi(); 138 phi->set_req(pnum, other->get_is_init()); 139 if (transform) { 140 set_req(IsInit, gvn->transform(phi)); 141 } 142 } else { 143 assert(is_init->find_int_con(0) == 1, "only with a non null inline type"); 144 } 145 146 // Merge field values 147 for (uint i = 0; i < field_count(); ++i) { 148 Node* val1 = field_value(i); 149 Node* val2 = other->field_value(i); 150 if (val1->is_InlineType()) { 151 if (val2->is_Phi()) { 152 val2 = gvn->transform(val2); 153 } 154 val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform); 155 } else { 156 assert(val1->is_Phi(), "must be a phi node"); 157 val1->set_req(pnum, val2); 158 } 159 if (transform) { 160 set_field_value(i, gvn->transform(val1)); 161 } 162 } 163 return this; 164 } 165 166 // Adds a new merge path to an inline type node with phi inputs 167 void InlineTypeNode::add_new_path(Node* region) { 168 assert(has_phi_inputs(region), "must have phi inputs"); 169 170 PhiNode* phi = get_oop()->as_Phi(); 171 phi->add_req(nullptr); 172 assert(phi->req() == region->req(), "must be same size as region"); 173 174 phi = get_is_buffered()->as_Phi(); 175 phi->add_req(nullptr); 176 assert(phi->req() == region->req(), "must be same size as region"); 177 178 phi = get_is_init()->as_Phi(); 179 phi->add_req(nullptr); 180 assert(phi->req() == region->req(), "must be same size as region"); 181 182 for (uint i = 0; i < field_count(); ++i) { 183 Node* val = field_value(i); 184 if (val->is_InlineType()) { 185 val->as_InlineType()->add_new_path(region); 186 } else { 187 val->as_Phi()->add_req(nullptr); 188 assert(val->req() == region->req(), "must be same size as region"); 189 } 190 } 191 } 192 193 Node* InlineTypeNode::field_value(uint index) const { 194 assert(index < field_count(), "index out of bounds"); 195 return in(Values + index); 196 } 197 198 // Get the value of the field at the given offset. 199 // If 'recursive' is true, flat inline type fields will be resolved recursively. 200 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const { 201 // Find the declared field which contains the field we are looking for 202 int index = inline_klass()->field_index_by_offset(offset); 203 Node* value = field_value(index); 204 assert(value != nullptr, "field value not found"); 205 206 if (!recursive || !field_is_flat(index)) { 207 assert(offset == field_offset(index), "offset mismatch"); 208 return value; 209 } 210 211 // Flat inline type field 212 InlineTypeNode* vt = value->as_InlineType(); 213 if (offset == field_null_marker_offset(index)) { 214 return vt->get_is_init(); 215 } else { 216 int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field 217 sub_offset += vt->inline_klass()->payload_offset(); // Add header size 218 return vt->field_value_by_offset(sub_offset, recursive); 219 } 220 } 221 222 void InlineTypeNode::set_field_value(uint index, Node* value) { 223 assert(index < field_count(), "index out of bounds"); 224 set_req(Values + index, value); 225 } 226 227 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) { 228 set_field_value(field_index(offset), value); 229 } 230 231 int InlineTypeNode::field_offset(uint index) const { 232 assert(index < field_count(), "index out of bounds"); 233 return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes(); 234 } 235 236 uint InlineTypeNode::field_index(int offset) const { 237 uint i = 0; 238 for (; i < field_count() && field_offset(i) != offset; i++) { } 239 assert(i < field_count(), "field not found"); 240 return i; 241 } 242 243 ciType* InlineTypeNode::field_type(uint index) const { 244 assert(index < field_count(), "index out of bounds"); 245 return inline_klass()->declared_nonstatic_field_at(index)->type(); 246 } 247 248 bool InlineTypeNode::field_is_flat(uint index) const { 249 assert(index < field_count(), "index out of bounds"); 250 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 251 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 252 return field->is_flat(); 253 } 254 255 bool InlineTypeNode::field_is_null_free(uint index) const { 256 assert(index < field_count(), "index out of bounds"); 257 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 258 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 259 return field->is_null_free(); 260 } 261 262 bool InlineTypeNode::field_is_volatile(uint index) const { 263 assert(index < field_count(), "index out of bounds"); 264 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 265 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 266 return field->is_volatile(); 267 } 268 269 int InlineTypeNode::field_null_marker_offset(uint index) const { 270 assert(index < field_count(), "index out of bounds"); 271 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 272 assert(field->is_flat(), "must be an inline type"); 273 return field->null_marker_offset(); 274 } 275 276 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, Node_List& null_markers, SafePointNode* sfpt) { 277 uint cnt = 0; 278 for (uint i = 0; i < field_count(); ++i) { 279 Node* value = field_value(i); 280 if (field_is_flat(i)) { 281 InlineTypeNode* vt = value->as_InlineType(); 282 cnt += vt->add_fields_to_safepoint(worklist, null_markers, sfpt); 283 if (!field_is_null_free(i)) { 284 null_markers.push(vt->get_is_init()); 285 cnt++; 286 } 287 continue; 288 } 289 if (value->is_InlineType()) { 290 // Add inline type to the worklist to process later 291 worklist.push(value); 292 } 293 sfpt->add_req(value); 294 cnt++; 295 } 296 return cnt; 297 } 298 299 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) { 300 JVMState* jvms = sfpt->jvms(); 301 assert(jvms != nullptr, "missing JVMS"); 302 uint first_ind = (sfpt->req() - jvms->scloff()); 303 304 // Iterate over the inline type fields in order of increasing offset and add the 305 // field values to the safepoint. Nullable inline types have an IsInit field that 306 // needs to be checked before using the field values. 307 const TypeInt* tinit = igvn->type(get_is_init())->isa_int(); 308 if (tinit != nullptr && !tinit->is_con(1)) { 309 sfpt->add_req(get_is_init()); 310 } else { 311 sfpt->add_req(igvn->C->top()); 312 } 313 Node_List null_markers; 314 uint nfields = add_fields_to_safepoint(worklist, null_markers, sfpt); 315 // Add null markers after the field values 316 for (uint i = 0; i < null_markers.size(); ++i) { 317 sfpt->add_req(null_markers.at(i)); 318 } 319 jvms->set_endoff(sfpt->req()); 320 // Replace safepoint edge by SafePointScalarObjectNode 321 SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(), 322 nullptr, 323 first_ind, 324 sfpt->jvms()->depth(), 325 nfields); 326 sobj->init_req(0, igvn->C->root()); 327 sobj = igvn->transform(sobj)->as_SafePointScalarObject(); 328 igvn->rehash_node_delayed(sfpt); 329 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 330 Node* debug = sfpt->in(i); 331 if (debug != nullptr && debug->uncast() == this) { 332 sfpt->set_req(i, sobj); 333 } 334 } 335 } 336 337 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) { 338 // If the inline type has a constant or loaded oop, use the oop instead of scalarization 339 // in the safepoint to avoid keeping field loads live just for the debug info. 340 Node* oop = get_oop(); 341 bool use_oop = false; 342 if (allow_oop && is_allocated(igvn) && oop->is_Phi()) { 343 Unique_Node_List worklist; 344 VectorSet visited; 345 visited.set(oop->_idx); 346 worklist.push(oop); 347 use_oop = true; 348 while (worklist.size() > 0 && use_oop) { 349 Node* n = worklist.pop(); 350 for (uint i = 1; i < n->req(); i++) { 351 Node* in = n->in(i); 352 if (in->is_Phi() && !visited.test_set(in->_idx)) { 353 worklist.push(in); 354 } else if (!(in->is_Con() || in->is_Parm())) { 355 use_oop = false; 356 break; 357 } 358 } 359 } 360 } else { 361 use_oop = allow_oop && is_allocated(igvn) && 362 (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load())); 363 } 364 365 ResourceMark rm; 366 Unique_Node_List safepoints; 367 Unique_Node_List vt_worklist; 368 Unique_Node_List worklist; 369 worklist.push(this); 370 while (worklist.size() > 0) { 371 Node* n = worklist.pop(); 372 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 373 Node* use = n->fast_out(i); 374 if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) { 375 safepoints.push(use); 376 } else if (use->is_ConstraintCast()) { 377 worklist.push(use); 378 } 379 } 380 } 381 382 // Process all safepoint uses and scalarize inline type 383 while (safepoints.size() > 0) { 384 SafePointNode* sfpt = safepoints.pop()->as_SafePoint(); 385 if (use_oop) { 386 for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) { 387 Node* debug = sfpt->in(i); 388 if (debug != nullptr && debug->uncast() == this) { 389 sfpt->set_req(i, get_oop()); 390 } 391 } 392 igvn->rehash_node_delayed(sfpt); 393 } else { 394 make_scalar_in_safepoint(igvn, vt_worklist, sfpt); 395 } 396 } 397 // Now scalarize non-flat fields 398 for (uint i = 0; i < vt_worklist.size(); ++i) { 399 InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType(); 400 vt->make_scalar_in_safepoints(igvn); 401 } 402 if (outcnt() == 0) { 403 igvn->record_for_igvn(this); 404 } 405 } 406 407 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const { 408 const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr(); 409 const TypePtr* adr_type = nullptr; 410 bool is_array = ary_type != nullptr; 411 if ((decorators & C2_MISMATCHED) != 0) { 412 adr_type = TypeRawPtr::BOTTOM; 413 } else if (is_array) { 414 // In the case of a flat inline type array, each field has its own slice 415 adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot); 416 } else { 417 ciField* field = holder->get_field_by_offset(offset, false); 418 assert(field != nullptr, "field not found"); 419 adr_type = gvn.C->alias_type(field)->adr_type(); 420 } 421 return adr_type; 422 } 423 424 // We limit scalarization for inline types with circular fields and can therefore observe nodes 425 // of the same type but with different scalarization depth during GVN. This method adjusts the 426 // scalarization depth to avoid inconsistencies during merging. 427 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) { 428 if (!kit->C->has_circular_inline_type()) { 429 return this; 430 } 431 GrowableArray<ciType*> visited; 432 visited.push(inline_klass()); 433 return adjust_scalarization_depth_impl(kit, visited); 434 } 435 436 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) { 437 InlineTypeNode* val = this; 438 for (uint i = 0; i < field_count(); ++i) { 439 Node* value = field_value(i); 440 Node* new_value = value; 441 ciType* ft = field_type(i); 442 if (value->is_InlineType()) { 443 if (!field_is_flat(i) && visited.contains(ft)) { 444 new_value = value->as_InlineType()->buffer(kit)->get_oop(); 445 } else { 446 int old_len = visited.length(); 447 visited.push(ft); 448 new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited); 449 visited.trunc_to(old_len); 450 } 451 } else if (ft->is_inlinetype() && !visited.contains(ft)) { 452 int old_len = visited.length(); 453 visited.push(ft); 454 new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 455 visited.trunc_to(old_len); 456 } 457 if (value != new_value) { 458 if (val == this) { 459 val = clone_if_required(&kit->gvn(), kit->map()); 460 } 461 val->set_field_value(i, new_value); 462 } 463 } 464 return (val == this) ? this : kit->gvn().transform(val)->as_InlineType(); 465 } 466 467 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) { 468 // Initialize the inline type by loading its field values from 469 // memory and adding the values as input edges to the node. 470 for (uint i = 0; i < field_count(); ++i) { 471 int offset = holder_offset + field_offset(i); 472 Node* value = nullptr; 473 ciType* ft = field_type(i); 474 bool null_free = field_is_null_free(i); 475 if (null_free && ft->as_inline_klass()->is_empty()) { 476 // Loading from a field of an empty inline type. Just return the all-zero instance. 477 value = make_all_zero_impl(kit->gvn(), ft->as_inline_klass(), visited); 478 } else if (field_is_flat(i)) { 479 // Recursively load the flat inline type field 480 int nm_offset = null_free ? -1 : (holder_offset + field_null_marker_offset(i)); 481 value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, nullptr, holder, offset, /* atomic */ false, nm_offset, decorators, visited); 482 } else { 483 const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr(); 484 bool is_array = (oop_ptr->isa_aryptr() != nullptr); 485 bool mismatched = (decorators & C2_MISMATCHED) != 0; 486 if (base->is_Con() && oop_ptr->is_inlinetypeptr() && !is_array && !mismatched) { 487 // If the oop to the inline type is constant (static final field), we can 488 // also treat the fields as constants because the inline type is immutable. 489 ciObject* constant_oop = oop_ptr->const_oop(); 490 ciField* field = holder->get_field_by_offset(offset, false); 491 assert(field != nullptr, "field not found"); 492 ciConstant constant = constant_oop->as_instance()->field_value(field); 493 const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true); 494 assert(con_type != nullptr, "type not found"); 495 value = kit->gvn().transform(kit->makecon(con_type)); 496 // Check type of constant which might be more precise than the static field type 497 if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) { 498 ft = con_type->inline_klass(); 499 } 500 } else { 501 // Load field value from memory 502 const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); 503 Node* adr = kit->basic_plus_adr(base, ptr, offset); 504 BasicType bt = type2field[ft->basic_type()]; 505 assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); 506 const Type* val_type = Type::get_const_type(ft); 507 if (null_free) { 508 val_type = val_type->join_speculative(TypePtr::NOTNULL); 509 } 510 value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators); 511 } 512 // Loading a non-flattened inline type from memory 513 if (visited.contains(ft)) { 514 kit->C->set_has_circular_inline_type(true); 515 } else if (ft->is_inlinetype()) { 516 int old_len = visited.length(); 517 visited.push(ft); 518 value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 519 visited.trunc_to(old_len); 520 } 521 } 522 set_field_value(i, value); 523 } 524 } 525 526 // Get a field value from the payload by shifting it according to the offset 527 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) { 528 // Shift to the right position in the long value 529 assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload"); 530 Node* value = nullptr; 531 Node* shift_val = gvn->intcon(offset << LogBitsPerByte); 532 if (bt == T_LONG) { 533 value = gvn->transform(new URShiftLNode(payload, shift_val)); 534 value = gvn->transform(new ConvL2INode(value)); 535 } else { 536 value = gvn->transform(new URShiftINode(payload, shift_val)); 537 } 538 539 if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) { 540 return value; 541 } else { 542 // Make sure to zero unused bits in the 32-bit value 543 return Compile::narrow_value(val_bt, value, nullptr, gvn, true); 544 } 545 } 546 547 // Convert a payload value to field values 548 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset) { 549 PhaseGVN* gvn = &kit->gvn(); 550 Node* value = nullptr; 551 if (!null_free) { 552 // Get the null marker 553 value = get_payload_value(gvn, payload, bt, T_BOOLEAN, null_marker_offset); 554 set_req(IsInit, value); 555 } 556 // Iterate over the fields and get their values from the payload 557 for (uint i = 0; i < field_count(); ++i) { 558 ciType* ft = field_type(i); 559 bool field_null_free = field_is_null_free(i); 560 int offset = holder_offset + field_offset(i) - inline_klass()->payload_offset(); 561 if (field_is_flat(i)) { 562 null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset(); 563 InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free); 564 vt->convert_from_payload(kit, bt, payload, offset, field_null_free, null_marker_offset); 565 value = gvn->transform(vt); 566 } else { 567 value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset); 568 if (!ft->is_primitive_type()) { 569 // Narrow oop field 570 assert(UseCompressedOops && bt == T_LONG, "Naturally atomic"); 571 const Type* val_type = Type::get_const_type(ft); 572 if (field_null_free) { 573 val_type = val_type->join_speculative(TypePtr::NOTNULL); 574 } 575 value = gvn->transform(new CastI2NNode(kit->control(), value)); 576 value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop())); 577 value = gvn->transform(new CastPPNode(kit->control(), value, val_type, ConstraintCastNode::UnconditionalDependency)); 578 579 // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the 580 // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint, 581 // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to 582 // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint. 583 584 if (ft->is_inlinetype()) { 585 GrowableArray<ciType*> visited; 586 value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 587 } 588 } 589 } 590 set_field_value(i, value); 591 } 592 } 593 594 // Set a field value in the payload by shifting it according to the offset 595 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) { 596 assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload"); 597 598 // Make sure to zero unused bits in the 32-bit value 599 if (val_bt == T_BYTE || val_bt == T_BOOLEAN) { 600 value = gvn->transform(new AndINode(value, gvn->intcon(0xFF))); 601 } else if (val_bt == T_CHAR || val_bt == T_SHORT) { 602 value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF))); 603 } else if (val_bt == T_FLOAT) { 604 value = gvn->transform(new MoveF2INode(value)); 605 } else { 606 assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt)); 607 } 608 609 Node* shift_val = gvn->intcon(offset << LogBitsPerByte); 610 if (bt == T_LONG) { 611 // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l) 612 value = gvn->transform(new ConvI2LNode(value)); 613 value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF))); 614 615 Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val)); 616 payload = new OrLNode(shift_value, payload); 617 } else { 618 Node* shift_value = gvn->transform(new LShiftINode(value, shift_val)); 619 payload = new OrINode(shift_value, payload); 620 } 621 return gvn->transform(payload); 622 } 623 624 // Convert the field values to a payload value of type 'bt' 625 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const { 626 PhaseGVN* gvn = &kit->gvn(); 627 Node* value = nullptr; 628 if (!null_free) { 629 // Set the null marker 630 value = get_is_init(); 631 payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset); 632 } 633 // Iterate over the fields and add their values to the payload 634 for (uint i = 0; i < field_count(); ++i) { 635 value = field_value(i); 636 int inner_offset = field_offset(i) - inline_klass()->payload_offset(); 637 int offset = holder_offset + inner_offset; 638 if (field_is_flat(i)) { 639 null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset(); 640 payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2); 641 } else { 642 ciType* ft = field_type(i); 643 BasicType field_bt = ft->basic_type(); 644 if (!ft->is_primitive_type()) { 645 // Narrow oop field 646 assert(UseCompressedOops && bt == T_LONG, "Naturally atomic"); 647 assert(inner_offset != -1, "sanity"); 648 if (oop_off_1 == -1) { 649 oop_off_1 = inner_offset; 650 } else { 651 assert(oop_off_2 == -1, "already set"); 652 oop_off_2 = inner_offset; 653 } 654 const Type* val_type = Type::get_const_type(ft)->make_narrowoop(); 655 if (value->is_InlineType()) { 656 PreserveReexecuteState preexecs(kit); 657 kit->jvms()->set_should_reexecute(true); 658 value = value->as_InlineType()->buffer(kit, false); 659 } 660 value = gvn->transform(new EncodePNode(value, val_type)); 661 value = gvn->transform(new CastP2XNode(kit->control(), value)); 662 value = gvn->transform(new ConvL2INode(value)); 663 field_bt = T_INT; 664 } 665 payload = set_payload_value(gvn, payload, bt, value, field_bt, offset); 666 } 667 } 668 return payload; 669 } 670 671 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, bool atomic, int null_marker_offset, DecoratorSet decorators) const { 672 if (kit->gvn().type(base)->isa_aryptr()) { 673 kit->C->set_flat_accesses(); 674 } 675 ciInlineKlass* vk = inline_klass(); 676 bool null_free = (null_marker_offset == -1); 677 678 if (atomic) { 679 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 680 #ifdef ASSERT 681 bool is_naturally_atomic = (!is_array && vk->is_empty()) || (null_free && vk->nof_declared_nonstatic_fields() == 1); 682 assert(!is_naturally_atomic, "No atomic access required"); 683 #endif 684 // Convert to a payload value <= 64-bit and write atomically. 685 // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit 686 // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always 687 // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that 688 // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'. 689 BasicType bt = vk->atomic_size_to_basic_type(null_free); 690 Node* payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0); 691 int oop_off_1 = -1; 692 int oop_off_2 = -1; 693 payload = convert_to_payload(kit, bt, payload, 0, null_free, null_marker_offset - holder_offset, oop_off_1, oop_off_2); 694 695 if (!UseG1GC || oop_off_1 == -1) { 696 // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed. 697 assert(oop_off_2 == -1 || !UseG1GC, "sanity"); 698 // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store. 699 assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields"); 700 const Type* val_type = Type::get_const_basic_type(bt); 701 702 if (!is_array) { 703 Node* adr = kit->basic_plus_adr(base, ptr, holder_offset); 704 kit->insert_mem_bar(Op_MemBarCPUOrder); 705 kit->access_store_at(base, adr, TypeRawPtr::BOTTOM, payload, val_type, bt, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 706 kit->insert_mem_bar(Op_MemBarCPUOrder); 707 } else { 708 assert(holder_offset == 0, "sanity"); 709 710 RegionNode* region = new RegionNode(3); 711 kit->gvn().set_type(region, Type::CONTROL); 712 kit->record_for_igvn(region); 713 714 Node* bol = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first 715 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 716 717 Node* input_memory_state = kit->reset_memory(); 718 kit->set_all_memory(input_memory_state); 719 720 Node* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 721 kit->gvn().set_type(mem, Type::MEMORY); 722 kit->record_for_igvn(mem); 723 724 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 725 kit->gvn().set_type(io, Type::ABIO); 726 kit->record_for_igvn(io); 727 728 // Nullable 729 kit->set_control(kit->IfFalse(iff)); 730 if (!kit->stopped()) { 731 assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable"); 732 kit->insert_mem_bar(Op_MemBarCPUOrder); 733 kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, bt, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 734 kit->insert_mem_bar(Op_MemBarCPUOrder); 735 mem->init_req(1, kit->reset_memory()); 736 io->init_req(1, kit->i_o()); 737 } 738 region->init_req(1, kit->control()); 739 740 // Null-free 741 kit->set_control(kit->IfTrue(iff)); 742 if (!kit->stopped()) { 743 kit->set_all_memory(input_memory_state); 744 745 // Check if it's atomic 746 RegionNode* region_null_free = new RegionNode(3); 747 kit->gvn().set_type(region_null_free, Type::CONTROL); 748 kit->record_for_igvn(region_null_free); 749 750 Node* mem_null_free = PhiNode::make(region_null_free, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 751 kit->gvn().set_type(mem_null_free, Type::MEMORY); 752 kit->record_for_igvn(mem_null_free); 753 754 PhiNode* io_null_free = PhiNode::make(region_null_free, kit->i_o(), Type::ABIO); 755 kit->gvn().set_type(io_null_free, Type::ABIO); 756 kit->record_for_igvn(io_null_free); 757 758 Node* bol = kit->null_free_atomic_array_test(base, vk); 759 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 760 761 // Atomic 762 kit->set_control(kit->IfTrue(iff)); 763 if (!kit->stopped()) { 764 BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true); 765 const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free); 766 kit->set_all_memory(input_memory_state); 767 768 if (bt == T_LONG && bt_null_free != T_LONG) { 769 payload = kit->gvn().transform(new ConvL2INode(payload)); 770 } 771 772 Node* cast = base; 773 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true); 774 kit->insert_mem_bar(Op_MemBarCPUOrder); 775 kit->access_store_at(cast, adr, TypeRawPtr::BOTTOM, payload, val_type_null_free, bt_null_free, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 776 kit->insert_mem_bar(Op_MemBarCPUOrder); 777 mem_null_free->init_req(1, kit->reset_memory()); 778 io_null_free->init_req(1, kit->i_o()); 779 } 780 region_null_free->init_req(1, kit->control()); 781 782 // Non-Atomic 783 kit->set_control(kit->IfFalse(iff)); 784 if (!kit->stopped()) { 785 kit->set_all_memory(input_memory_state); 786 787 Node* cast = base; 788 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false); 789 store(kit, cast, adr, holder, holder_offset - vk->payload_offset(), -1, decorators); 790 mem_null_free->init_req(2, kit->reset_memory()); 791 io_null_free->init_req(2, kit->i_o()); 792 } 793 region_null_free->init_req(2, kit->control()); 794 795 mem->init_req(2, kit->gvn().transform(mem_null_free)); 796 io->init_req(2, kit->gvn().transform(io_null_free)); 797 region->init_req(2, kit->gvn().transform(region_null_free)); 798 } 799 800 kit->set_control(kit->gvn().transform(region)); 801 kit->set_all_memory(kit->gvn().transform(mem)); 802 kit->set_i_o(kit->gvn().transform(io)); 803 } 804 } else { 805 if (oop_off_2 == -1 && UseCompressedOops && vk->nof_declared_nonstatic_fields() == 1) { 806 // TODO 8350865 Implement this 807 // If null free, it's not a long but an int store. Deoptimize for now. 808 BuildCutout unless(kit, kit->null_free_array_test(base, /* null_free = */ false), PROB_MAX); 809 kit->uncommon_trap_exact(Deoptimization::Reason_unhandled, Deoptimization::Action_none); 810 } 811 812 // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend. 813 assert(UseG1GC, "Unexpected GC"); 814 assert(bt == T_LONG, "Unexpected payload type"); 815 // If one oop, set the offset (if no offset is set, two oops are assumed by the backend) 816 Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr; 817 Node* adr = kit->basic_plus_adr(base, ptr, holder_offset); 818 kit->insert_mem_bar(Op_MemBarCPUOrder); 819 Node* mem = kit->reset_memory(); 820 kit->set_all_memory(mem); 821 Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, adr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered)); 822 kit->set_memory(st, TypeRawPtr::BOTTOM); 823 kit->insert_mem_bar(Op_MemBarCPUOrder); 824 } 825 return; 826 } 827 828 // The inline type is embedded into the object without an oop header. Subtract the 829 // offset of the first field to account for the missing header when storing the values. 830 holder_offset -= vk->payload_offset(); 831 832 if (!null_free) { 833 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 834 Node* adr = kit->basic_plus_adr(base, ptr, null_marker_offset); 835 kit->access_store_at(base, adr, TypeRawPtr::BOTTOM, get_is_init(), TypeInt::BOOL, T_BOOLEAN, is_array ? (decorators | IS_ARRAY) : decorators); 836 } 837 store(kit, base, ptr, holder, holder_offset, -1, decorators); 838 } 839 840 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, int offsetOnly, DecoratorSet decorators) const { 841 // Write field values to memory 842 for (uint i = 0; i < field_count(); ++i) { 843 if (offsetOnly != -1 && offsetOnly != field_offset(i)) continue; 844 int offset = holder_offset + field_offset(i); 845 Node* value = field_value(i); 846 ciType* ft = field_type(i); 847 if (field_is_flat(i)) { 848 // Recursively store the flat inline type field 849 int nm_offset = field_is_null_free(i) ? -1 : (holder_offset + field_null_marker_offset(i)); 850 value->as_InlineType()->store_flat(kit, base, ptr, nullptr, holder, offset, /* atomic */ false, nm_offset, decorators); 851 } else { 852 // Store field value to memory 853 const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); 854 Node* adr = kit->basic_plus_adr(base, ptr, offset); 855 BasicType bt = type2field[ft->basic_type()]; 856 assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); 857 const Type* val_type = Type::get_const_type(ft); 858 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 859 kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators); 860 } 861 } 862 } 863 864 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) { 865 if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) { 866 // Already buffered 867 return this; 868 } 869 870 // Check if inline type is already buffered 871 Node* not_buffered_ctl = kit->top(); 872 Node* not_null_oop = kit->null_check_oop(get_oop(), ¬_buffered_ctl, /* never_see_null = */ false, safe_for_replace); 873 if (not_buffered_ctl->is_top()) { 874 // Already buffered 875 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace); 876 vt->set_is_buffered(kit->gvn()); 877 vt = kit->gvn().transform(vt)->as_InlineType(); 878 if (safe_for_replace) { 879 kit->replace_in_map(this, vt); 880 } 881 return vt; 882 } 883 Node* buffered_ctl = kit->control(); 884 kit->set_control(not_buffered_ctl); 885 886 // Inline type is not buffered, check if it is null. 887 Node* null_ctl = kit->top(); 888 kit->null_check_common(get_is_init(), T_INT, false, &null_ctl); 889 bool null_free = null_ctl->is_top(); 890 891 RegionNode* region = new RegionNode(4); 892 PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM)); 893 894 // InlineType is already buffered 895 region->init_req(1, buffered_ctl); 896 oop->init_req(1, not_null_oop); 897 898 // InlineType is null 899 region->init_req(2, null_ctl); 900 oop->init_req(2, kit->gvn().zerocon(T_OBJECT)); 901 902 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 903 PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM); 904 905 if (!kit->stopped()) { 906 assert(!is_allocated(&kit->gvn()), "already buffered"); 907 PreserveJVMState pjvms(kit); 908 ciInlineKlass* vk = inline_klass(); 909 // Allocate and initialize buffer, re-execute on deoptimization. 910 kit->jvms()->set_bci(kit->bci()); 911 kit->jvms()->set_should_reexecute(true); 912 kit->kill_dead_locals(); 913 Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); 914 Node* alloc_oop = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this); 915 store(kit, alloc_oop, alloc_oop, vk); 916 917 // Do not let stores that initialize this buffer be reordered with a subsequent 918 // store that would make this buffer accessible by other threads. 919 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); 920 assert(alloc != nullptr, "must have an allocation node"); 921 kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 922 oop->init_req(3, alloc_oop); 923 region->init_req(3, kit->control()); 924 io ->init_req(3, kit->i_o()); 925 mem ->init_req(3, kit->merged_memory()); 926 } 927 928 // Update GraphKit 929 kit->set_control(kit->gvn().transform(region)); 930 kit->set_i_o(kit->gvn().transform(io)); 931 kit->set_all_memory(kit->gvn().transform(mem)); 932 kit->record_for_igvn(region); 933 kit->record_for_igvn(oop); 934 kit->record_for_igvn(io); 935 kit->record_for_igvn(mem); 936 937 // Use cloned InlineTypeNode to propagate oop from now on 938 Node* res_oop = kit->gvn().transform(oop); 939 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace); 940 vt->set_oop(kit->gvn(), res_oop); 941 vt->set_is_buffered(kit->gvn()); 942 vt = kit->gvn().transform(vt)->as_InlineType(); 943 if (safe_for_replace) { 944 kit->replace_in_map(this, vt); 945 } 946 // InlineTypeNode::remove_redundant_allocations piggybacks on split if. 947 // Make sure it gets a chance to remove this allocation. 948 kit->C->set_has_split_ifs(true); 949 return vt; 950 } 951 952 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const { 953 if (phase->find_int_con(get_is_buffered(), 0) == 1) { 954 return true; 955 } 956 Node* oop = get_oop(); 957 const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type(); 958 return !oop_type->maybe_null(); 959 } 960 961 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) { 962 ProjNode* pn = call->proj_out_or_null(proj_idx); 963 if (pn != nullptr) { 964 C->gvn_replace_by(pn, value); 965 C->initial_gvn()->hash_delete(pn); 966 pn->set_req(0, C->top()); 967 } 968 proj_idx += type2size[bt]; 969 } 970 971 // When a call returns multiple values, it has several result 972 // projections, one per field. Replacing the result of the call by an 973 // inline type node (after late inlining) requires that for each result 974 // projection, we find the corresponding inline type field. 975 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) { 976 uint proj_idx = TypeFunc::Parms; 977 // Replace oop projection 978 replace_proj(C, call, proj_idx, get_oop(), T_OBJECT); 979 // Replace field projections 980 replace_field_projs(C, call, proj_idx); 981 // Replace is_init projection 982 replace_proj(C, call, proj_idx, get_is_init(), T_BOOLEAN); 983 assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection"); 984 } 985 986 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) { 987 for (uint i = 0; i < field_count(); ++i) { 988 Node* value = field_value(i); 989 if (field_is_flat(i)) { 990 InlineTypeNode* vt = value->as_InlineType(); 991 // Replace field projections for flat field 992 vt->replace_field_projs(C, call, proj_idx); 993 if (!field_is_null_free(i)) { 994 // Replace is_init projection for nullable field 995 replace_proj(C, call, proj_idx, vt->get_is_init(), T_BOOLEAN); 996 } 997 continue; 998 } 999 // Replace projection for field value 1000 replace_proj(C, call, proj_idx, value, field_type(i)->basic_type()); 1001 } 1002 } 1003 1004 Node* InlineTypeNode::allocate_fields(GraphKit* kit) { 1005 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map()); 1006 for (uint i = 0; i < field_count(); i++) { 1007 Node* value = field_value(i); 1008 if (field_is_flat(i)) { 1009 // Flat inline type field 1010 vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit)); 1011 } else if (value->is_InlineType()) { 1012 // Non-flat inline type field 1013 vt->set_field_value(i, value->as_InlineType()->buffer(kit)); 1014 } 1015 } 1016 vt = kit->gvn().transform(vt)->as_InlineType(); 1017 kit->replace_in_map(this, vt); 1018 return vt; 1019 } 1020 1021 // Replace a buffer allocation by a dominating allocation 1022 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) { 1023 // Remove initializing stores and GC barriers 1024 for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) { 1025 Node* use = res->fast_out(i); 1026 if (use->is_AddP()) { 1027 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1028 Node* store = use->fast_out(j)->isa_Store(); 1029 if (store != nullptr) { 1030 igvn->rehash_node_delayed(store); 1031 igvn->replace_in_uses(store, store->in(MemNode::Memory)); 1032 } 1033 } 1034 } else if (use->Opcode() == Op_CastP2X) { 1035 if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) { 1036 // The G1 pre-barrier uses a CastP2X both for the pointer of the object 1037 // we store into, as well as the value we are storing. Skip if this is a 1038 // barrier for storing 'res' into another object. 1039 continue; 1040 } 1041 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1042 bs->eliminate_gc_barrier(igvn, use); 1043 --i; --imax; 1044 } 1045 } 1046 igvn->replace_node(res, dom); 1047 } 1048 1049 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1050 Node* oop = get_oop(); 1051 Node* is_buffered = get_is_buffered(); 1052 1053 if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) { 1054 InlineTypeNode* vtptr = oop->as_InlineType(); 1055 set_oop(*phase, vtptr->get_oop()); 1056 set_is_buffered(*phase); 1057 set_is_init(*phase); 1058 for (uint i = Values; i < vtptr->req(); ++i) { 1059 set_req(i, vtptr->in(i)); 1060 } 1061 return this; 1062 } 1063 1064 // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an 1065 // allocation because the only case we load from a naked CheckCastPP is when we exit a 1066 // constructor of an inline type and we want to relinquish the larval oop there. This has a 1067 // couple of benefits: 1068 // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode. 1069 // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge 1070 // when we try to clone a value object. 1071 // - The buffering, if needed, is delayed until it is required. This new allocation, since it is 1072 // created from an InlineTypeNode, is recognized as not having a unique identity and in the 1073 // future, we can move them around more freely such as hoisting out of loops. This is not true 1074 // for the old allocation since larval value objects do have unique identities. 1075 Node* base = is_loaded(phase); 1076 if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) { 1077 if (oop != base || phase->type(is_buffered) != TypeInt::ONE) { 1078 set_oop(*phase, base); 1079 set_is_buffered(*phase); 1080 return this; 1081 } 1082 } 1083 1084 if (can_reshape) { 1085 PhaseIterGVN* igvn = phase->is_IterGVN(); 1086 if (is_allocated(phase)) { 1087 // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones, 1088 // they will be removed anyway and changing the memory chain will confuse other optimizations. 1089 // This can happen with late inlining when we first allocate an inline type argument 1090 // but later decide to inline the call after the callee code also triggered allocation. 1091 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1092 AllocateNode* alloc = fast_out(i)->isa_Allocate(); 1093 if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) { 1094 // Found a re-allocation 1095 Node* res = alloc->result_cast(); 1096 if (res != nullptr && res->is_CheckCastPP()) { 1097 // Replace allocation by oop and unlink AllocateNode 1098 replace_allocation(igvn, res, oop); 1099 igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top()); 1100 --i; --imax; 1101 } 1102 } 1103 } 1104 } 1105 } 1106 1107 return nullptr; 1108 } 1109 1110 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) { 1111 // Create a new InlineTypeNode with uninitialized values and nullptr oop 1112 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free); 1113 vt->set_is_buffered(gvn, false); 1114 vt->set_is_init(gvn); 1115 return vt; 1116 } 1117 1118 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) { 1119 GrowableArray<ciType*> visited; 1120 visited.push(vk); 1121 return make_all_zero_impl(gvn, vk, visited); 1122 } 1123 1124 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) { 1125 // Create a new InlineTypeNode initialized with all zero 1126 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true); 1127 vt->set_is_buffered(gvn, false); 1128 vt->set_is_init(gvn); 1129 for (uint i = 0; i < vt->field_count(); ++i) { 1130 ciType* ft = vt->field_type(i); 1131 Node* value = gvn.zerocon(ft->basic_type()); 1132 if (!vt->field_is_flat(i) && visited.contains(ft)) { 1133 gvn.C->set_has_circular_inline_type(true); 1134 } else if (ft->is_inlinetype()) { 1135 int old_len = visited.length(); 1136 visited.push(ft); 1137 ciInlineKlass* vk = ft->as_inline_klass(); 1138 if (vt->field_is_null_free(i)) { 1139 value = make_all_zero_impl(gvn, vk, visited); 1140 } else { 1141 value = make_null_impl(gvn, vk, visited); 1142 } 1143 visited.trunc_to(old_len); 1144 } 1145 vt->set_field_value(i, value); 1146 } 1147 vt = gvn.transform(vt)->as_InlineType(); 1148 assert(vt->is_all_zero(&gvn), "must be the all-zero inline type"); 1149 return vt; 1150 } 1151 1152 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const { 1153 const TypeInt* tinit = gvn->type(get_is_init())->isa_int(); 1154 if (tinit == nullptr || !tinit->is_con(1)) { 1155 return false; // May be null 1156 } 1157 for (uint i = 0; i < field_count(); ++i) { 1158 Node* value = field_value(i); 1159 if (field_is_null_free(i)) { 1160 // Null-free value class field must have the all-zero value. If 'flat' is set, 1161 // reject non-flat fields because they need to be initialized with an oop to a buffer. 1162 if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) { 1163 return false; 1164 } 1165 continue; 1166 } else if (value->is_InlineType()) { 1167 // Nullable value class field must be null 1168 tinit = gvn->type(value->as_InlineType()->get_is_init())->isa_int(); 1169 if (tinit != nullptr && tinit->is_con(0)) { 1170 continue; 1171 } 1172 return false; 1173 } else if (!gvn->type(value)->is_zero_type()) { 1174 return false; 1175 } 1176 } 1177 return true; 1178 } 1179 1180 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) { 1181 GrowableArray<ciType*> visited; 1182 visited.push(vk); 1183 return make_from_oop_impl(kit, oop, vk, visited); 1184 } 1185 1186 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) { 1187 PhaseGVN& gvn = kit->gvn(); 1188 1189 // Create and initialize an InlineTypeNode by loading all field 1190 // values from a heap-allocated version and also save the oop. 1191 InlineTypeNode* vt = nullptr; 1192 1193 if (oop->isa_InlineType()) { 1194 return oop->as_InlineType(); 1195 } 1196 1197 if (gvn.type(oop)->maybe_null()) { 1198 // Add a null check because the oop may be null 1199 Node* null_ctl = kit->top(); 1200 Node* not_null_oop = kit->null_check_oop(oop, &null_ctl); 1201 if (kit->stopped()) { 1202 // Constant null 1203 kit->set_control(null_ctl); 1204 vt = make_null_impl(gvn, vk, visited); 1205 kit->record_for_igvn(vt); 1206 return vt; 1207 } 1208 vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false); 1209 vt->set_is_buffered(gvn); 1210 vt->set_is_init(gvn); 1211 vt->load(kit, not_null_oop, not_null_oop, vk, visited); 1212 1213 if (null_ctl != kit->top()) { 1214 InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited); 1215 Node* region = new RegionNode(3); 1216 region->init_req(1, kit->control()); 1217 region->init_req(2, null_ctl); 1218 vt = vt->clone_with_phis(&gvn, region, kit->map()); 1219 vt->merge_with(&gvn, null_vt, 2, true); 1220 vt->set_oop(gvn, oop); 1221 kit->set_control(gvn.transform(region)); 1222 } 1223 } else { 1224 // Oop can never be null 1225 vt = new InlineTypeNode(vk, oop, /* null_free= */ true); 1226 Node* init_ctl = kit->control(); 1227 vt->set_is_buffered(gvn); 1228 vt->set_is_init(gvn); 1229 vt->load(kit, oop, oop, vk, visited); 1230 // TODO 8284443 1231 // assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType || 1232 // AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded"); 1233 } 1234 assert(vt->is_allocated(&gvn), "inline type should be allocated"); 1235 kit->record_for_igvn(vt); 1236 return gvn.transform(vt)->as_InlineType(); 1237 } 1238 1239 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, 1240 bool atomic, int null_marker_offset, DecoratorSet decorators) { 1241 GrowableArray<ciType*> visited; 1242 visited.push(vk); 1243 return make_from_flat_impl(kit, vk, obj, ptr, idx, holder, holder_offset, atomic, null_marker_offset, decorators, visited); 1244 } 1245 1246 // GraphKit wrapper for the 'make_from_flat' method 1247 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, 1248 bool atomic, int null_marker_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) { 1249 if (kit->gvn().type(obj)->isa_aryptr()) { 1250 kit->C->set_flat_accesses(); 1251 } 1252 // Create and initialize an InlineTypeNode by loading all field values from 1253 // a flat inline type field at 'holder_offset' or from an inline type array. 1254 bool null_free = (null_marker_offset == -1); 1255 InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free); 1256 1257 bool is_array = (kit->gvn().type(obj)->isa_aryptr() != nullptr); 1258 if (atomic) { 1259 // Read atomically and convert from payload 1260 #ifdef ASSERT 1261 bool is_naturally_atomic = (!is_array && vk->is_empty()) || (null_free && vk->nof_declared_nonstatic_fields() == 1); 1262 assert(!is_naturally_atomic, "No atomic access required"); 1263 #endif 1264 BasicType bt = vk->atomic_size_to_basic_type(null_free); 1265 decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD; 1266 const Type* val_type = Type::get_const_basic_type(bt); 1267 1268 Node* payload = nullptr; 1269 if (!is_array) { 1270 Node* adr = kit->basic_plus_adr(obj, ptr, holder_offset); 1271 payload = kit->access_load_at(obj, adr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1272 } else { 1273 assert(holder_offset == 0, "sanity"); 1274 1275 RegionNode* region = new RegionNode(3); 1276 kit->gvn().set_type(region, Type::CONTROL); 1277 kit->record_for_igvn(region); 1278 1279 payload = PhiNode::make(region, nullptr, val_type); 1280 kit->gvn().set_type(payload, val_type); 1281 kit->record_for_igvn(payload); 1282 1283 Node* input_memory_state = kit->reset_memory(); 1284 kit->set_all_memory(input_memory_state); 1285 1286 Node* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 1287 kit->gvn().set_type(mem, Type::MEMORY); 1288 kit->record_for_igvn(mem); 1289 1290 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 1291 kit->gvn().set_type(io, Type::ABIO); 1292 kit->record_for_igvn(io); 1293 1294 Node* bol = kit->null_free_array_test(obj); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first 1295 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 1296 1297 // Nullable 1298 kit->set_control(kit->IfFalse(iff)); 1299 if (!kit->stopped()) { 1300 assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable"); 1301 1302 Node* cast = obj; 1303 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ false, /* not_null_free */ true, /* atomic */ true); 1304 Node* load = kit->access_load_at(cast, adr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1305 payload->init_req(1, load); 1306 mem->init_req(1, kit->reset_memory()); 1307 io->init_req(1, kit->i_o()); 1308 } 1309 region->init_req(1, kit->control()); 1310 1311 // Null-free 1312 kit->set_control(kit->IfTrue(iff)); 1313 if (!kit->stopped()) { 1314 kit->set_all_memory(input_memory_state); 1315 1316 // Check if it's atomic 1317 RegionNode* region_null_free = new RegionNode(3); 1318 kit->gvn().set_type(region_null_free, Type::CONTROL); 1319 kit->record_for_igvn(region_null_free); 1320 1321 Node* payload_null_free = PhiNode::make(region_null_free, nullptr, val_type); 1322 kit->gvn().set_type(payload_null_free, val_type); 1323 kit->record_for_igvn(payload_null_free); 1324 1325 Node* mem_null_free = PhiNode::make(region_null_free, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 1326 kit->gvn().set_type(mem_null_free, Type::MEMORY); 1327 kit->record_for_igvn(mem_null_free); 1328 1329 PhiNode* io_null_free = PhiNode::make(region_null_free, kit->i_o(), Type::ABIO); 1330 kit->gvn().set_type(io_null_free, Type::ABIO); 1331 kit->record_for_igvn(io_null_free); 1332 1333 bol = kit->null_free_atomic_array_test(obj, vk); 1334 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 1335 1336 // Atomic 1337 kit->set_control(kit->IfTrue(iff)); 1338 if (!kit->stopped()) { 1339 BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true); 1340 const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free); 1341 kit->set_all_memory(input_memory_state); 1342 1343 Node* cast = obj; 1344 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true); 1345 Node* load = kit->access_load_at(cast, adr, TypeRawPtr::BOTTOM, val_type_null_free, bt_null_free, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1346 if (bt == T_LONG && bt_null_free != T_LONG) { 1347 load = kit->gvn().transform(new ConvI2LNode(load)); 1348 } 1349 // Set the null marker if not known to be null-free 1350 if (!null_free) { 1351 load = set_payload_value(&kit->gvn(), load, bt, kit->intcon(1), T_BOOLEAN, null_marker_offset); 1352 } 1353 payload_null_free->init_req(1, load); 1354 mem_null_free->init_req(1, kit->reset_memory()); 1355 io_null_free->init_req(1, kit->i_o()); 1356 } 1357 region_null_free->init_req(1, kit->control()); 1358 1359 // Non-Atomic 1360 kit->set_control(kit->IfFalse(iff)); 1361 if (!kit->stopped()) { 1362 // TODO 8350865 Is the conversion to/from payload folded? We should wire this directly. 1363 // Also remove the PreserveReexecuteState in Parse::array_load when buffering is no longer possible. 1364 kit->set_all_memory(input_memory_state); 1365 1366 InlineTypeNode* vt_atomic = make_uninitialized(kit->gvn(), vk, true); 1367 Node* cast = obj; 1368 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false); 1369 vt_atomic->load(kit, cast, adr, holder, visited, holder_offset - vk->payload_offset(), decorators); 1370 1371 Node* tmp_payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0); 1372 int oop_off_1 = -1; 1373 int oop_off_2 = -1; 1374 tmp_payload = vt_atomic->convert_to_payload(kit, bt, tmp_payload, 0, null_free, null_marker_offset, oop_off_1, oop_off_2); 1375 1376 payload_null_free->init_req(2, tmp_payload); 1377 mem_null_free->init_req(2, kit->reset_memory()); 1378 io_null_free->init_req(2, kit->i_o()); 1379 } 1380 region_null_free->init_req(2, kit->control()); 1381 1382 region->init_req(2, kit->gvn().transform(region_null_free)); 1383 payload->init_req(2, kit->gvn().transform(payload_null_free)); 1384 mem->init_req(2, kit->gvn().transform(mem_null_free)); 1385 io->init_req(2, kit->gvn().transform(io_null_free)); 1386 } 1387 1388 kit->set_control(kit->gvn().transform(region)); 1389 kit->set_all_memory(kit->gvn().transform(mem)); 1390 kit->set_i_o(kit->gvn().transform(io)); 1391 } 1392 1393 vt->convert_from_payload(kit, bt, kit->gvn().transform(payload), 0, null_free, null_marker_offset - holder_offset); 1394 return kit->gvn().transform(vt)->as_InlineType(); 1395 } 1396 1397 // The inline type is embedded into the object without an oop header. Subtract the 1398 // offset of the first field to account for the missing header when storing the values. 1399 holder_offset -= vk->payload_offset(); 1400 1401 if (!null_free) { 1402 Node* adr = kit->basic_plus_adr(obj, ptr, null_marker_offset); 1403 Node* nm_value = kit->access_load_at(obj, adr, TypeRawPtr::BOTTOM, TypeInt::BOOL, T_BOOLEAN, is_array ? (decorators | IS_ARRAY) : decorators); 1404 vt->set_req(IsInit, nm_value); 1405 } 1406 vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators); 1407 1408 assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop"); 1409 return kit->gvn().transform(vt)->as_InlineType(); 1410 } 1411 1412 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) { 1413 InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free); 1414 if (!in) { 1415 // Keep track of the oop. The returned inline type might already be buffered. 1416 Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++)); 1417 vt->set_oop(kit->gvn(), oop); 1418 } 1419 GrowableArray<ciType*> visited; 1420 visited.push(vk); 1421 vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited); 1422 return kit->gvn().transform(vt)->as_InlineType(); 1423 } 1424 1425 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) { 1426 if (vk == nullptr) { 1427 vk = inline_klass(); 1428 } 1429 for (uint i = 0; i < field_count(); ++i) { 1430 int offset = holder_offset + field_offset(i); 1431 Node* value = field_value(i); 1432 if (value->is_InlineType()) { 1433 InlineTypeNode* vt = value->as_InlineType(); 1434 if (vt->type()->inline_klass()->is_empty()) { 1435 continue; 1436 } else if (field_is_flat(i) && vt->is_InlineType()) { 1437 // Check inline type field load recursively 1438 base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset()); 1439 if (base == nullptr) { 1440 return nullptr; 1441 } 1442 continue; 1443 } else { 1444 value = vt->get_oop(); 1445 if (value->Opcode() == Op_CastPP) { 1446 // Skip CastPP 1447 value = value->in(1); 1448 } 1449 } 1450 } 1451 if (value->isa_DecodeN()) { 1452 // Skip DecodeN 1453 value = value->in(1); 1454 } 1455 if (value->isa_Load()) { 1456 // Check if base and offset of field load matches inline type layout 1457 intptr_t loffset = 0; 1458 Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset); 1459 if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) { 1460 return nullptr; 1461 } else if (base == nullptr) { 1462 // Set base and check if pointer type matches 1463 base = lbase; 1464 const TypeInstPtr* vtptr = phase->type(base)->isa_instptr(); 1465 if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) { 1466 return nullptr; 1467 } 1468 } 1469 } else { 1470 return nullptr; 1471 } 1472 } 1473 return base; 1474 } 1475 1476 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) { 1477 const TypeKlassPtr* tk = TypeKlassPtr::make(vk); 1478 intptr_t bits = tk->get_con(); 1479 set_nth_bit(bits, 0); 1480 return gvn.longcon((jlong)bits); 1481 } 1482 1483 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) { 1484 if (!null_free && in) { 1485 n->init_req(base_input++, get_is_init()); 1486 } 1487 for (uint i = 0; i < field_count(); i++) { 1488 Node* arg = field_value(i); 1489 if (field_is_flat(i)) { 1490 // Flat inline type field 1491 arg->as_InlineType()->pass_fields(kit, n, base_input, in); 1492 if (!field_is_null_free(i)) { 1493 assert(field_null_marker_offset(i) != -1, "inconsistency"); 1494 n->init_req(base_input++, arg->as_InlineType()->get_is_init()); 1495 } 1496 } else { 1497 if (arg->is_InlineType()) { 1498 // Non-flat inline type field 1499 InlineTypeNode* vt = arg->as_InlineType(); 1500 assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return"); 1501 arg = vt->buffer(kit); 1502 } 1503 // Initialize call/return arguments 1504 n->init_req(base_input++, arg); 1505 if (field_type(i)->size() == 2) { 1506 n->init_req(base_input++, kit->top()); 1507 } 1508 } 1509 } 1510 // The last argument is used to pass IsInit information to compiled code and not required here. 1511 if (!null_free && !in) { 1512 n->init_req(base_input++, kit->top()); 1513 } 1514 } 1515 1516 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) { 1517 PhaseGVN& gvn = kit->gvn(); 1518 Node* is_init = nullptr; 1519 if (!null_free) { 1520 // Nullable inline type 1521 if (in) { 1522 // Set IsInit field 1523 if (multi->is_Start()) { 1524 is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1525 } else { 1526 is_init = multi->as_Call()->in(base_input); 1527 } 1528 set_req(IsInit, is_init); 1529 base_input++; 1530 } 1531 // Add a null check to make subsequent loads dependent on 1532 assert(null_check_region == nullptr, "already set"); 1533 if (is_init == nullptr) { 1534 // Will only be initialized below, use dummy node for now 1535 is_init = new Node(1); 1536 is_init->init_req(0, kit->control()); // Add an input to prevent dummy from being dead 1537 gvn.set_type_bottom(is_init); 1538 } 1539 Node* null_ctrl = kit->top(); 1540 kit->null_check_common(is_init, T_INT, false, &null_ctrl); 1541 Node* non_null_ctrl = kit->control(); 1542 null_check_region = new RegionNode(3); 1543 null_check_region->init_req(1, non_null_ctrl); 1544 null_check_region->init_req(2, null_ctrl); 1545 null_check_region = gvn.transform(null_check_region); 1546 kit->set_control(null_check_region); 1547 } 1548 1549 for (uint i = 0; i < field_count(); ++i) { 1550 ciType* type = field_type(i); 1551 Node* parm = nullptr; 1552 if (field_is_flat(i)) { 1553 // Flat inline type field 1554 InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i)); 1555 vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited); 1556 if (!field_is_null_free(i)) { 1557 assert(field_null_marker_offset(i) != -1, "inconsistency"); 1558 Node* is_init = nullptr; 1559 if (multi->is_Start()) { 1560 is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1561 } else if (in) { 1562 is_init = multi->as_Call()->in(base_input); 1563 } else { 1564 is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1565 } 1566 vt->set_req(IsInit, is_init); 1567 base_input++; 1568 } 1569 parm = gvn.transform(vt); 1570 } else { 1571 if (multi->is_Start()) { 1572 assert(in, "return from start?"); 1573 parm = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1574 } else if (in) { 1575 parm = multi->as_Call()->in(base_input); 1576 } else { 1577 parm = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1578 } 1579 bool null_free = field_is_null_free(i); 1580 // Non-flat inline type field 1581 if (type->is_inlinetype()) { 1582 if (null_check_region != nullptr) { 1583 // We limit scalarization for inline types with circular fields and can therefore observe nodes 1584 // of the same type but with different scalarization depth during GVN. To avoid inconsistencies 1585 // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized. 1586 if (parm->is_InlineType() && kit->C->has_circular_inline_type()) { 1587 parm = parm->as_InlineType()->get_oop(); 1588 } 1589 // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory 1590 parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass())); 1591 parm->set_req(2, kit->zerocon(T_OBJECT)); 1592 parm = gvn.transform(parm); 1593 null_free = false; 1594 } 1595 if (visited.contains(type)) { 1596 kit->C->set_has_circular_inline_type(true); 1597 } else if (!parm->is_InlineType()) { 1598 int old_len = visited.length(); 1599 visited.push(type); 1600 if (null_free) { 1601 parm = kit->cast_not_null(parm); 1602 } 1603 parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited); 1604 visited.trunc_to(old_len); 1605 } 1606 } 1607 base_input += type->size(); 1608 } 1609 assert(parm != nullptr, "should never be null"); 1610 assert(field_value(i) == nullptr, "already set"); 1611 set_field_value(i, parm); 1612 gvn.record_for_igvn(parm); 1613 } 1614 // The last argument is used to pass IsInit information to compiled code 1615 if (!null_free && !in) { 1616 Node* cmp = is_init->raw_out(0); 1617 is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1618 set_req(IsInit, is_init); 1619 gvn.hash_delete(cmp); 1620 cmp->set_req(1, is_init); 1621 gvn.hash_find_insert(cmp); 1622 gvn.record_for_igvn(cmp); 1623 base_input++; 1624 } 1625 } 1626 1627 // Search for multiple allocations of this inline type and try to replace them by dominating allocations. 1628 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations. 1629 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) { 1630 PhaseIterGVN* igvn = &phase->igvn(); 1631 // Search for allocations of this inline type. Ignore scalar replaceable ones, they 1632 // will be removed anyway and changing the memory chain will confuse other optimizations. 1633 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1634 AllocateNode* alloc = fast_out(i)->isa_Allocate(); 1635 if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) { 1636 Node* res = alloc->result_cast(); 1637 if (res == nullptr || !res->is_CheckCastPP()) { 1638 break; // No unique CheckCastPP 1639 } 1640 // Search for a dominating allocation of the same inline type 1641 Node* res_dom = res; 1642 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1643 AllocateNode* alloc_other = fast_out(j)->isa_Allocate(); 1644 if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) { 1645 Node* res_other = alloc_other->result_cast(); 1646 if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom && 1647 phase->is_dominator(res_other->in(0), res_dom->in(0))) { 1648 res_dom = res_other; 1649 } 1650 } 1651 } 1652 if (res_dom != res) { 1653 // Replace allocation by dominating one. 1654 replace_allocation(igvn, res, res_dom); 1655 // The result of the dominated allocation is now unused and will be removed 1656 // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts. 1657 igvn->_worklist.push(alloc); 1658 } 1659 } 1660 } 1661 } 1662 1663 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) { 1664 GrowableArray<ciType*> visited; 1665 visited.push(vk); 1666 return make_null_impl(gvn, vk, visited, transform); 1667 } 1668 1669 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) { 1670 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false); 1671 vt->set_is_buffered(gvn); 1672 vt->set_is_init(gvn, false); 1673 for (uint i = 0; i < vt->field_count(); i++) { 1674 ciType* ft = vt->field_type(i); 1675 Node* value = gvn.zerocon(ft->basic_type()); 1676 if (!vt->field_is_flat(i) && visited.contains(ft)) { 1677 gvn.C->set_has_circular_inline_type(true); 1678 } else if (ft->is_inlinetype()) { 1679 int old_len = visited.length(); 1680 visited.push(ft); 1681 value = make_null_impl(gvn, ft->as_inline_klass(), visited); 1682 visited.trunc_to(old_len); 1683 } 1684 vt->set_field_value(i, value); 1685 } 1686 return transform ? gvn.transform(vt)->as_InlineType() : vt; 1687 } 1688 1689 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) { 1690 if (!safe_for_replace || (map == nullptr && outcnt() != 0)) { 1691 return clone()->as_InlineType(); 1692 } 1693 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1694 if (fast_out(i) != map) { 1695 return clone()->as_InlineType(); 1696 } 1697 } 1698 gvn->hash_delete(this); 1699 return this; 1700 } 1701 1702 const Type* InlineTypeNode::Value(PhaseGVN* phase) const { 1703 Node* oop = get_oop(); 1704 const Type* toop = phase->type(oop); 1705 #ifdef ASSERT 1706 if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) { 1707 // We are not allocated (anymore) and should therefore not have an instance id 1708 dump(1); 1709 assert(false, "Unbuffered inline type should not have known instance id"); 1710 } 1711 #endif 1712 const Type* t = toop->filter_speculative(_type); 1713 if (t->singleton()) { 1714 // Don't replace InlineType by a constant 1715 t = _type; 1716 } 1717 const Type* tinit = phase->type(in(IsInit)); 1718 if (tinit == Type::TOP) { 1719 return Type::TOP; 1720 } 1721 if (tinit->isa_int() && tinit->is_int()->is_con(1)) { 1722 t = t->join_speculative(TypePtr::NOTNULL); 1723 } 1724 return t; 1725 }