1 /* 2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.hpp" 29 #include "oops/flatArrayKlass.hpp" 30 #include "oops/objArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/inlinetypenode.hpp" 34 #include "opto/memnode.hpp" 35 #include "opto/parse.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/runtime.hpp" 38 #include "opto/subnode.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/handles.inline.hpp" 41 42 //============================================================================= 43 // Helper methods for _get* and _put* bytecodes 44 //============================================================================= 45 46 void Parse::do_field_access(bool is_get, bool is_field) { 47 bool will_link; 48 ciField* field = iter().get_field(will_link); 49 assert(will_link, "getfield: typeflow responsibility"); 50 51 ciInstanceKlass* field_holder = field->holder(); 52 53 if (is_get && is_field && field_holder->is_inlinetype() && peek()->is_InlineType()) { 54 InlineTypeNode* vt = peek()->as_InlineType(); 55 null_check(vt); 56 Node* value = vt->field_value_by_offset(field->offset_in_bytes()); 57 if (value->is_InlineType()) { 58 value = value->as_InlineType()->adjust_scalarization_depth(this); 59 } 60 pop(); 61 push_node(field->layout_type(), value); 62 return; 63 } 64 65 if (is_field == field->is_static()) { 66 // Interpreter will throw java_lang_IncompatibleClassChangeError 67 // Check this before allowing <clinit> methods to access static fields 68 uncommon_trap(Deoptimization::Reason_unhandled, 69 Deoptimization::Action_none); 70 return; 71 } 72 73 // Deoptimize on putfield writes to call site target field outside of CallSite ctor. 74 if (!is_get && field->is_call_site_target() && 75 !(method()->holder() == field_holder && method()->is_object_constructor())) { 76 uncommon_trap(Deoptimization::Reason_unhandled, 77 Deoptimization::Action_reinterpret, 78 nullptr, "put to call site target field"); 79 return; 80 } 81 82 if (C->needs_clinit_barrier(field, method())) { 83 clinit_barrier(field_holder, method()); 84 if (stopped()) return; 85 } 86 87 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility"); 88 89 // Note: We do not check for an unloaded field type here any more. 90 91 // Generate code for the object pointer. 92 Node* obj; 93 if (is_field) { 94 int obj_depth = is_get ? 0 : field->type()->size(); 95 obj = null_check(peek(obj_depth)); 96 // Compile-time detect of null-exception? 97 if (stopped()) return; 98 99 #ifdef ASSERT 100 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 101 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 102 #endif 103 104 if (is_get) { 105 (void) pop(); // pop receiver before getting 106 do_get_xxx(obj, field); 107 } else { 108 do_put_xxx(obj, field, is_field); 109 if (stopped()) { 110 return; 111 } 112 (void) pop(); // pop receiver after putting 113 } 114 } else { 115 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 116 obj = _gvn.makecon(tip); 117 if (is_get) { 118 do_get_xxx(obj, field); 119 } else { 120 do_put_xxx(obj, field, is_field); 121 } 122 } 123 } 124 125 void Parse::do_get_xxx(Node* obj, ciField* field) { 126 BasicType bt = field->layout_type(); 127 // Does this field have a constant value? If so, just push the value. 128 if (field->is_constant() && !field->is_flat() && 129 // Keep consistent with types found by ciTypeFlow: for an 130 // unloaded field type, ciTypeFlow::StateVector::do_getstatic() 131 // speculates the field is null. The code in the rest of this 132 // method does the same. We must not bypass it and use a non 133 // null constant here. 134 (bt != T_OBJECT || field->type()->is_loaded())) { 135 // final or stable field 136 Node* con = make_constant_from_field(field, obj); 137 if (con != nullptr) { 138 push_node(field->layout_type(), con); 139 return; 140 } 141 } 142 143 ciType* field_klass = field->type(); 144 int offset = field->offset_in_bytes(); 145 bool must_assert_null = false; 146 147 Node* ld = nullptr; 148 if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) { 149 // Loading from a field of an empty inline type. Just return the default instance. 150 ld = InlineTypeNode::make_default(_gvn, field_klass->as_inline_klass()); 151 } else if (field->is_flat()) { 152 // Loading from a flat inline type field. 153 ld = InlineTypeNode::make_from_flat(this, field_klass->as_inline_klass(), obj, obj, field->holder(), offset); 154 } else { 155 // Build the resultant type of the load 156 const Type* type; 157 if (is_reference_type(bt)) { 158 if (!field_klass->is_loaded()) { 159 type = TypeInstPtr::BOTTOM; 160 must_assert_null = true; 161 } else if (field->is_static_constant()) { 162 // This can happen if the constant oop is non-perm. 163 ciObject* con = field->constant_value().as_object(); 164 // Do not "join" in the previous type; it doesn't add value, 165 // and may yield a vacuous result if the field is of interface type. 166 if (con->is_null_object()) { 167 type = TypePtr::NULL_PTR; 168 } else { 169 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 170 } 171 assert(type != nullptr, "field singleton type must be consistent"); 172 } else { 173 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 174 if (field->is_null_free() && field->is_static()) { 175 // Check if static inline type field is already initialized 176 ciInstance* mirror = field->holder()->java_mirror(); 177 ciObject* val = mirror->field_value(field).as_object(); 178 if (!val->is_null_object()) { 179 type = type->join_speculative(TypePtr::NOTNULL); 180 } 181 } 182 } 183 } else { 184 type = Type::get_const_basic_type(bt); 185 } 186 Node* adr = basic_plus_adr(obj, obj, offset); 187 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 188 DecoratorSet decorators = IN_HEAP; 189 decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED; 190 ld = access_load_at(obj, adr, adr_type, type, bt, decorators); 191 if (field_klass->is_inlinetype()) { 192 // Load a non-flattened inline type from memory 193 ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass(), field->is_null_free()); 194 } 195 } 196 197 // Adjust Java stack 198 if (type2size[bt] == 1) 199 push(ld); 200 else 201 push_pair(ld); 202 203 if (must_assert_null) { 204 // Do not take a trap here. It's possible that the program 205 // will never load the field's class, and will happily see 206 // null values in this field forever. Don't stumble into a 207 // trap for such a program, or we might get a long series 208 // of useless recompilations. (Or, we might load a class 209 // which should not be loaded.) If we ever see a non-null 210 // value, we will then trap and recompile. (The trap will 211 // not need to mention the class index, since the class will 212 // already have been loaded if we ever see a non-null value.) 213 // uncommon_trap(iter().get_field_signature_index()); 214 if (PrintOpto && (Verbose || WizardMode)) { 215 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 216 } 217 if (C->log() != nullptr) { 218 C->log()->elem("assert_null reason='field' klass='%d'", 219 C->log()->identify(field_klass)); 220 } 221 // If there is going to be a trap, put it at the next bytecode: 222 set_bci(iter().next_bci()); 223 null_assert(peek()); 224 set_bci(iter().cur_bci()); // put it back 225 } 226 } 227 228 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 229 bool is_vol = field->is_volatile(); 230 int offset = field->offset_in_bytes(); 231 BasicType bt = field->layout_type(); 232 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 233 234 if (obj->is_InlineType()) { 235 // TODO 8325106 Factor into own method 236 // TODO 8325106 Assert that we only do this in the constructor and align with checks in ::do_call 237 //if (_method->is_object_constructor() && _method->holder()->is_inlinetype()) { 238 assert(obj->as_InlineType()->is_larval(), "must be larval"); 239 240 // TODO 8325106 Assert that holder is null-free 241 /* 242 int holder_depth = field->type()->size(); 243 null_check(peek(holder_depth)); 244 if (stopped()) { 245 return; 246 } 247 */ 248 249 if (field->is_null_free()) { 250 PreserveReexecuteState preexecs(this); 251 jvms()->set_should_reexecute(true); 252 int nargs = 1 + field->type()->size(); 253 inc_sp(nargs); 254 val = null_check(val); 255 if (stopped()) { 256 return; 257 } 258 } 259 if (!val->is_InlineType() && field->type()->is_inlinetype()) { 260 // Scalarize inline type field value 261 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass(), field->is_null_free()); 262 } else if (val->is_InlineType() && !field->is_flat()) { 263 // Field value needs to be allocated because it can be merged with an oop. 264 // Re-execute if buffering triggers deoptimization. 265 PreserveReexecuteState preexecs(this); 266 jvms()->set_should_reexecute(true); 267 int nargs = 1 + field->type()->size(); 268 inc_sp(nargs); 269 val = val->as_InlineType()->buffer(this); 270 } 271 272 // Clone the inline type node and set the new field value 273 InlineTypeNode* new_vt = obj->as_InlineType()->clone_if_required(&_gvn, _map); 274 new_vt->set_field_value_by_offset(field->offset_in_bytes(), val); 275 { 276 PreserveReexecuteState preexecs(this); 277 jvms()->set_should_reexecute(true); 278 int nargs = 1 + field->type()->size(); 279 inc_sp(nargs); 280 new_vt = new_vt->adjust_scalarization_depth(this); 281 } 282 283 // TODO 8325106 Double check and explain these checks 284 if ((!_caller->has_method() || C->inlining_incrementally() || _caller->method()->is_object_constructor()) && new_vt->is_allocated(&gvn())) { 285 assert(new_vt->as_InlineType()->is_allocated(&gvn()), "must be buffered"); 286 // We need to store to the buffer 287 // TODO 8325106 looks like G1BarrierSetC2::g1_can_remove_pre_barrier is not strong enough to remove the pre barrier 288 // TODO is it really guaranteed that the preval is null? 289 new_vt->store(this, new_vt->get_oop(), new_vt->get_oop(), new_vt->bottom_type()->inline_klass(), 0, C2_TIGHTLY_COUPLED_ALLOC | IN_HEAP | MO_UNORDERED, field->offset_in_bytes()); 290 291 // Preserve allocation ptr to create precedent edge to it in membar 292 // generated on exit from constructor. 293 AllocateNode* alloc = AllocateNode::Ideal_allocation(new_vt->get_oop()); 294 if (alloc != nullptr) { 295 set_alloc_with_final(new_vt->get_oop()); 296 } 297 set_wrote_final(true); 298 } 299 300 replace_in_map(obj, _gvn.transform(new_vt)); 301 return; 302 } 303 304 if (field->is_null_free()) { 305 PreserveReexecuteState preexecs(this); 306 inc_sp(1); 307 jvms()->set_should_reexecute(true); 308 val = null_check(val); 309 } 310 if (field->is_null_free() && field->type()->as_inline_klass()->is_empty()) { 311 // Storing to a field of an empty inline type. Ignore. 312 return; 313 } else if (field->is_flat()) { 314 // Storing to a flat inline type field. 315 if (!val->is_InlineType()) { 316 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass()); 317 } 318 inc_sp(1); 319 val->as_InlineType()->store_flat(this, obj, obj, field->holder(), offset); 320 dec_sp(1); 321 } else { 322 // Store the value. 323 const Type* field_type; 324 if (!field->type()->is_loaded()) { 325 field_type = TypeInstPtr::BOTTOM; 326 } else { 327 if (is_reference_type(bt)) { 328 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 329 } else { 330 field_type = Type::BOTTOM; 331 } 332 } 333 Node* adr = basic_plus_adr(obj, obj, offset); 334 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 335 DecoratorSet decorators = IN_HEAP; 336 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; 337 inc_sp(1); 338 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators); 339 dec_sp(1); 340 } 341 342 if (is_field) { 343 // Remember we wrote a volatile field. 344 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 345 // in constructors which have such stores. See do_exits() in parse1.cpp. 346 if (is_vol) { 347 set_wrote_volatile(true); 348 } 349 set_wrote_fields(true); 350 351 // If the field is final, the rules of Java say we are in <init> or <clinit>. 352 // Note the presence of writes to final non-static fields, so that we 353 // can insert a memory barrier later on to keep the writes from floating 354 // out of the constructor. 355 // Any method can write a @Stable field; insert memory barriers after those also. 356 if (field->is_final()) { 357 set_wrote_final(true); 358 if (AllocateNode::Ideal_allocation(obj) != nullptr) { 359 // Preserve allocation ptr to create precedent edge to it in membar 360 // generated on exit from constructor. 361 // Can't bind stable with its allocation, only record allocation for final field. 362 set_alloc_with_final(obj); 363 } 364 } 365 if (field->is_stable()) { 366 set_wrote_stable(true); 367 } 368 } 369 } 370 371 //============================================================================= 372 373 void Parse::do_newarray() { 374 bool will_link; 375 ciKlass* klass = iter().get_klass(will_link); 376 377 // Uncommon Trap when class that array contains is not loaded 378 // we need the loaded class for the rest of graph; do not 379 // initialize the container class (see Java spec)!!! 380 assert(will_link, "newarray: typeflow responsibility"); 381 382 ciArrayKlass* array_klass = ciArrayKlass::make(klass); 383 384 // Check that array_klass object is loaded 385 if (!array_klass->is_loaded()) { 386 // Generate uncommon_trap for unloaded array_class 387 uncommon_trap(Deoptimization::Reason_unloaded, 388 Deoptimization::Action_reinterpret, 389 array_klass); 390 return; 391 } else if (array_klass->element_klass() != nullptr && 392 array_klass->element_klass()->is_inlinetype() && 393 !array_klass->element_klass()->as_inline_klass()->is_initialized()) { 394 uncommon_trap(Deoptimization::Reason_uninitialized, 395 Deoptimization::Action_reinterpret, 396 nullptr); 397 return; 398 } 399 400 kill_dead_locals(); 401 402 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces); 403 Node* count_val = pop(); 404 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 405 push(obj); 406 } 407 408 409 void Parse::do_newarray(BasicType elem_type) { 410 kill_dead_locals(); 411 412 Node* count_val = pop(); 413 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 414 Node* obj = new_array(makecon(array_klass), count_val, 1); 415 // Push resultant oop onto stack 416 push(obj); 417 } 418 419 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 420 // Also handle the degenerate 1-dimensional case of anewarray. 421 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 422 Node* length = lengths[0]; 423 assert(length != nullptr, ""); 424 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), length, nargs); 425 if (ndimensions > 1) { 426 jint length_con = find_int_con(length, -1); 427 guarantee(length_con >= 0, "non-constant multianewarray"); 428 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 429 const TypePtr* adr_type = TypeAryPtr::OOPS; 430 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 431 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 432 for (jint i = 0; i < length_con; i++) { 433 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 434 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 435 Node* eaddr = basic_plus_adr(array, offset); 436 access_store_at(array, eaddr, adr_type, elem, elemtype, T_OBJECT, IN_HEAP | IS_ARRAY); 437 } 438 } 439 return array; 440 } 441 442 void Parse::do_multianewarray() { 443 int ndimensions = iter().get_dimensions(); 444 445 // the m-dimensional array 446 bool will_link; 447 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 448 assert(will_link, "multianewarray: typeflow responsibility"); 449 450 // Note: Array classes are always initialized; no is_initialized check. 451 452 kill_dead_locals(); 453 454 // get the lengths from the stack (first dimension is on top) 455 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 456 length[ndimensions] = nullptr; // terminating null for make_runtime_call 457 int j; 458 ciKlass* elem_klass = array_klass; 459 for (j = ndimensions-1; j >= 0; j--) { 460 length[j] = pop(); 461 elem_klass = elem_klass->as_array_klass()->element_klass(); 462 } 463 if (elem_klass != nullptr && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) { 464 inc_sp(ndimensions); 465 uncommon_trap(Deoptimization::Reason_uninitialized, 466 Deoptimization::Action_reinterpret, 467 nullptr); 468 return; 469 } 470 471 // The original expression was of this form: new T[length0][length1]... 472 // It is often the case that the lengths are small (except the last). 473 // If that happens, use the fast 1-d creator a constant number of times. 474 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100); 475 int64_t expand_count = 1; // count of allocations in the expansion 476 int64_t expand_fanout = 1; // running total fanout 477 for (j = 0; j < ndimensions-1; j++) { 478 int dim_con = find_int_con(length[j], -1); 479 // To prevent overflow, we use 64-bit values. Alternatively, 480 // we could clamp dim_con like so: 481 // dim_con = MIN2(dim_con, expand_limit); 482 expand_fanout *= dim_con; 483 expand_count += expand_fanout; // count the level-J sub-arrays 484 if (dim_con <= 0 485 || dim_con > expand_limit 486 || expand_count > expand_limit) { 487 expand_count = 0; 488 break; 489 } 490 } 491 492 // Can use multianewarray instead of [a]newarray if only one dimension, 493 // or if all non-final dimensions are small constants. 494 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 495 Node* obj = nullptr; 496 // Set the original stack and the reexecute bit for the interpreter 497 // to reexecute the multianewarray bytecode if deoptimization happens. 498 // Do it unconditionally even for one dimension multianewarray. 499 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 500 // when AllocateArray node for newarray is created. 501 { PreserveReexecuteState preexecs(this); 502 inc_sp(ndimensions); 503 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 504 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 505 } //original reexecute and sp are set back here 506 push(obj); 507 return; 508 } 509 510 address fun = nullptr; 511 switch (ndimensions) { 512 case 1: ShouldNotReachHere(); break; 513 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 514 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 515 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 516 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 517 }; 518 Node* c = nullptr; 519 520 if (fun != nullptr) { 521 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 522 OptoRuntime::multianewarray_Type(ndimensions), 523 fun, nullptr, TypeRawPtr::BOTTOM, 524 makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), 525 length[0], length[1], length[2], 526 (ndimensions > 2) ? length[3] : nullptr, 527 (ndimensions > 3) ? length[4] : nullptr); 528 } else { 529 // Create a java array for dimension sizes 530 Node* dims = nullptr; 531 { PreserveReexecuteState preexecs(this); 532 inc_sp(ndimensions); 533 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 534 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 535 536 // Fill-in it with values 537 for (j = 0; j < ndimensions; j++) { 538 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 539 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 540 } 541 } 542 543 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 544 OptoRuntime::multianewarrayN_Type(), 545 OptoRuntime::multianewarrayN_Java(), nullptr, TypeRawPtr::BOTTOM, 546 makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), 547 dims); 548 } 549 make_slow_call_ex(c, env()->Throwable_klass(), false); 550 551 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); 552 553 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass, Type::trust_interfaces); 554 555 // Improve the type: We know it's not null, exact, and of a given length. 556 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 557 type = type->is_aryptr()->cast_to_exactness(true); 558 559 const TypeInt* ltype = _gvn.find_int_type(length[0]); 560 if (ltype != nullptr) 561 type = type->is_aryptr()->cast_to_size(ltype); 562 563 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 564 565 Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); 566 push(cast); 567 568 // Possible improvements: 569 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 570 // - Issue CastII against length[*] values, to TypeInt::POS. 571 }