1 /* 2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "ci/ciInlineKlass.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "classfile/javaClasses.hpp" 30 #include "ci/ciObjArray.hpp" 31 #include "asm/register.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/c2/barrierSetC2.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "opto/addnode.hpp" 38 #include "opto/castnode.hpp" 39 #include "opto/convertnode.hpp" 40 #include "opto/graphKit.hpp" 41 #include "opto/idealKit.hpp" 42 #include "opto/inlinetypenode.hpp" 43 #include "opto/intrinsicnode.hpp" 44 #include "opto/locknode.hpp" 45 #include "opto/machnode.hpp" 46 #include "opto/narrowptrnode.hpp" 47 #include "opto/opaquenode.hpp" 48 #include "opto/parse.hpp" 49 #include "opto/rootnode.hpp" 50 #include "opto/runtime.hpp" 51 #include "opto/subtypenode.hpp" 52 #include "runtime/deoptimization.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "utilities/bitMap.inline.hpp" 55 #include "utilities/powerOfTwo.hpp" 56 #include "utilities/growableArray.hpp" 57 58 //----------------------------GraphKit----------------------------------------- 59 // Main utility constructor. 60 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn) 61 : Phase(Phase::Parser), 62 _env(C->env()), 63 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()), 64 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) 65 { 66 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled"); 67 _exceptions = jvms->map()->next_exception(); 68 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr); 69 set_jvms(jvms); 70 #ifdef ASSERT 71 if (_gvn.is_IterGVN() != nullptr) { 72 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used"); 73 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit) 74 _worklist_size = _gvn.C->igvn_worklist()->size(); 75 } 76 #endif 77 } 78 79 // Private constructor for parser. 80 GraphKit::GraphKit() 81 : Phase(Phase::Parser), 82 _env(C->env()), 83 _gvn(*C->initial_gvn()), 84 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) 85 { 86 _exceptions = nullptr; 87 set_map(nullptr); 88 debug_only(_sp = -99); 89 debug_only(set_bci(-99)); 90 } 91 92 93 94 //---------------------------clean_stack--------------------------------------- 95 // Clear away rubbish from the stack area of the JVM state. 96 // This destroys any arguments that may be waiting on the stack. 97 void GraphKit::clean_stack(int from_sp) { 98 SafePointNode* map = this->map(); 99 JVMState* jvms = this->jvms(); 100 int stk_size = jvms->stk_size(); 101 int stkoff = jvms->stkoff(); 102 Node* top = this->top(); 103 for (int i = from_sp; i < stk_size; i++) { 104 if (map->in(stkoff + i) != top) { 105 map->set_req(stkoff + i, top); 106 } 107 } 108 } 109 110 111 //--------------------------------sync_jvms----------------------------------- 112 // Make sure our current jvms agrees with our parse state. 113 JVMState* GraphKit::sync_jvms() const { 114 JVMState* jvms = this->jvms(); 115 jvms->set_bci(bci()); // Record the new bci in the JVMState 116 jvms->set_sp(sp()); // Record the new sp in the JVMState 117 assert(jvms_in_sync(), "jvms is now in sync"); 118 return jvms; 119 } 120 121 //--------------------------------sync_jvms_for_reexecute--------------------- 122 // Make sure our current jvms agrees with our parse state. This version 123 // uses the reexecute_sp for reexecuting bytecodes. 124 JVMState* GraphKit::sync_jvms_for_reexecute() { 125 JVMState* jvms = this->jvms(); 126 jvms->set_bci(bci()); // Record the new bci in the JVMState 127 jvms->set_sp(reexecute_sp()); // Record the new sp in the JVMState 128 return jvms; 129 } 130 131 #ifdef ASSERT 132 bool GraphKit::jvms_in_sync() const { 133 Parse* parse = is_Parse(); 134 if (parse == nullptr) { 135 if (bci() != jvms()->bci()) return false; 136 if (sp() != (int)jvms()->sp()) return false; 137 return true; 138 } 139 if (jvms()->method() != parse->method()) return false; 140 if (jvms()->bci() != parse->bci()) return false; 141 int jvms_sp = jvms()->sp(); 142 if (jvms_sp != parse->sp()) return false; 143 int jvms_depth = jvms()->depth(); 144 if (jvms_depth != parse->depth()) return false; 145 return true; 146 } 147 148 // Local helper checks for special internal merge points 149 // used to accumulate and merge exception states. 150 // They are marked by the region's in(0) edge being the map itself. 151 // Such merge points must never "escape" into the parser at large, 152 // until they have been handed to gvn.transform. 153 static bool is_hidden_merge(Node* reg) { 154 if (reg == nullptr) return false; 155 if (reg->is_Phi()) { 156 reg = reg->in(0); 157 if (reg == nullptr) return false; 158 } 159 return reg->is_Region() && reg->in(0) != nullptr && reg->in(0)->is_Root(); 160 } 161 162 void GraphKit::verify_map() const { 163 if (map() == nullptr) return; // null map is OK 164 assert(map()->req() <= jvms()->endoff(), "no extra garbage on map"); 165 assert(!map()->has_exceptions(), "call add_exception_states_from 1st"); 166 assert(!is_hidden_merge(control()), "call use_exception_state, not set_map"); 167 } 168 169 void GraphKit::verify_exception_state(SafePointNode* ex_map) { 170 assert(ex_map->next_exception() == nullptr, "not already part of a chain"); 171 assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop"); 172 } 173 #endif 174 175 //---------------------------stop_and_kill_map--------------------------------- 176 // Set _map to null, signalling a stop to further bytecode execution. 177 // First smash the current map's control to a constant, to mark it dead. 178 void GraphKit::stop_and_kill_map() { 179 SafePointNode* dead_map = stop(); 180 if (dead_map != nullptr) { 181 dead_map->disconnect_inputs(C); // Mark the map as killed. 182 assert(dead_map->is_killed(), "must be so marked"); 183 } 184 } 185 186 187 //--------------------------------stopped-------------------------------------- 188 // Tell if _map is null, or control is top. 189 bool GraphKit::stopped() { 190 if (map() == nullptr) return true; 191 else if (control() == top()) return true; 192 else return false; 193 } 194 195 196 //-----------------------------has_exception_handler---------------------------------- 197 // Tell if this method or any caller method has exception handlers. 198 bool GraphKit::has_exception_handler() { 199 for (JVMState* jvmsp = jvms(); jvmsp != nullptr; jvmsp = jvmsp->caller()) { 200 if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) { 201 return true; 202 } 203 } 204 return false; 205 } 206 207 //------------------------------save_ex_oop------------------------------------ 208 // Save an exception without blowing stack contents or other JVM state. 209 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) { 210 assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again"); 211 ex_map->add_req(ex_oop); 212 debug_only(verify_exception_state(ex_map)); 213 } 214 215 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) { 216 assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there"); 217 Node* ex_oop = ex_map->in(ex_map->req()-1); 218 if (clear_it) ex_map->del_req(ex_map->req()-1); 219 return ex_oop; 220 } 221 222 //-----------------------------saved_ex_oop------------------------------------ 223 // Recover a saved exception from its map. 224 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) { 225 return common_saved_ex_oop(ex_map, false); 226 } 227 228 //--------------------------clear_saved_ex_oop--------------------------------- 229 // Erase a previously saved exception from its map. 230 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) { 231 return common_saved_ex_oop(ex_map, true); 232 } 233 234 #ifdef ASSERT 235 //---------------------------has_saved_ex_oop---------------------------------- 236 // Erase a previously saved exception from its map. 237 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) { 238 return ex_map->req() == ex_map->jvms()->endoff()+1; 239 } 240 #endif 241 242 //-------------------------make_exception_state-------------------------------- 243 // Turn the current JVM state into an exception state, appending the ex_oop. 244 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) { 245 sync_jvms(); 246 SafePointNode* ex_map = stop(); // do not manipulate this map any more 247 set_saved_ex_oop(ex_map, ex_oop); 248 return ex_map; 249 } 250 251 252 //--------------------------add_exception_state-------------------------------- 253 // Add an exception to my list of exceptions. 254 void GraphKit::add_exception_state(SafePointNode* ex_map) { 255 if (ex_map == nullptr || ex_map->control() == top()) { 256 return; 257 } 258 #ifdef ASSERT 259 verify_exception_state(ex_map); 260 if (has_exceptions()) { 261 assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place"); 262 } 263 #endif 264 265 // If there is already an exception of exactly this type, merge with it. 266 // In particular, null-checks and other low-level exceptions common up here. 267 Node* ex_oop = saved_ex_oop(ex_map); 268 const Type* ex_type = _gvn.type(ex_oop); 269 if (ex_oop == top()) { 270 // No action needed. 271 return; 272 } 273 assert(ex_type->isa_instptr(), "exception must be an instance"); 274 for (SafePointNode* e2 = _exceptions; e2 != nullptr; e2 = e2->next_exception()) { 275 const Type* ex_type2 = _gvn.type(saved_ex_oop(e2)); 276 // We check sp also because call bytecodes can generate exceptions 277 // both before and after arguments are popped! 278 if (ex_type2 == ex_type 279 && e2->_jvms->sp() == ex_map->_jvms->sp()) { 280 combine_exception_states(ex_map, e2); 281 return; 282 } 283 } 284 285 // No pre-existing exception of the same type. Chain it on the list. 286 push_exception_state(ex_map); 287 } 288 289 //-----------------------add_exception_states_from----------------------------- 290 void GraphKit::add_exception_states_from(JVMState* jvms) { 291 SafePointNode* ex_map = jvms->map()->next_exception(); 292 if (ex_map != nullptr) { 293 jvms->map()->set_next_exception(nullptr); 294 for (SafePointNode* next_map; ex_map != nullptr; ex_map = next_map) { 295 next_map = ex_map->next_exception(); 296 ex_map->set_next_exception(nullptr); 297 add_exception_state(ex_map); 298 } 299 } 300 } 301 302 //-----------------------transfer_exceptions_into_jvms------------------------- 303 JVMState* GraphKit::transfer_exceptions_into_jvms() { 304 if (map() == nullptr) { 305 // We need a JVMS to carry the exceptions, but the map has gone away. 306 // Create a scratch JVMS, cloned from any of the exception states... 307 if (has_exceptions()) { 308 _map = _exceptions; 309 _map = clone_map(); 310 _map->set_next_exception(nullptr); 311 clear_saved_ex_oop(_map); 312 debug_only(verify_map()); 313 } else { 314 // ...or created from scratch 315 JVMState* jvms = new (C) JVMState(_method, nullptr); 316 jvms->set_bci(_bci); 317 jvms->set_sp(_sp); 318 jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); 319 set_jvms(jvms); 320 for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); 321 set_all_memory(top()); 322 while (map()->req() < jvms->endoff()) map()->add_req(top()); 323 } 324 // (This is a kludge, in case you didn't notice.) 325 set_control(top()); 326 } 327 JVMState* jvms = sync_jvms(); 328 assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet"); 329 jvms->map()->set_next_exception(_exceptions); 330 _exceptions = nullptr; // done with this set of exceptions 331 return jvms; 332 } 333 334 static inline void add_n_reqs(Node* dstphi, Node* srcphi) { 335 assert(is_hidden_merge(dstphi), "must be a special merge node"); 336 assert(is_hidden_merge(srcphi), "must be a special merge node"); 337 uint limit = srcphi->req(); 338 for (uint i = PhiNode::Input; i < limit; i++) { 339 dstphi->add_req(srcphi->in(i)); 340 } 341 } 342 static inline void add_one_req(Node* dstphi, Node* src) { 343 assert(is_hidden_merge(dstphi), "must be a special merge node"); 344 assert(!is_hidden_merge(src), "must not be a special merge node"); 345 dstphi->add_req(src); 346 } 347 348 //-----------------------combine_exception_states------------------------------ 349 // This helper function combines exception states by building phis on a 350 // specially marked state-merging region. These regions and phis are 351 // untransformed, and can build up gradually. The region is marked by 352 // having a control input of its exception map, rather than null. Such 353 // regions do not appear except in this function, and in use_exception_state. 354 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) { 355 if (failing()) return; // dying anyway... 356 JVMState* ex_jvms = ex_map->_jvms; 357 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains"); 358 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); 359 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); 360 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); 361 assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects"); 362 assert(ex_map->req() == phi_map->req(), "matching maps"); 363 uint tos = ex_jvms->stkoff() + ex_jvms->sp(); 364 Node* hidden_merge_mark = root(); 365 Node* region = phi_map->control(); 366 MergeMemNode* phi_mem = phi_map->merged_memory(); 367 MergeMemNode* ex_mem = ex_map->merged_memory(); 368 if (region->in(0) != hidden_merge_mark) { 369 // The control input is not (yet) a specially-marked region in phi_map. 370 // Make it so, and build some phis. 371 region = new RegionNode(2); 372 _gvn.set_type(region, Type::CONTROL); 373 region->set_req(0, hidden_merge_mark); // marks an internal ex-state 374 region->init_req(1, phi_map->control()); 375 phi_map->set_control(region); 376 Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO); 377 record_for_igvn(io_phi); 378 _gvn.set_type(io_phi, Type::ABIO); 379 phi_map->set_i_o(io_phi); 380 for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) { 381 Node* m = mms.memory(); 382 Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C)); 383 record_for_igvn(m_phi); 384 _gvn.set_type(m_phi, Type::MEMORY); 385 mms.set_memory(m_phi); 386 } 387 } 388 389 // Either or both of phi_map and ex_map might already be converted into phis. 390 Node* ex_control = ex_map->control(); 391 // if there is special marking on ex_map also, we add multiple edges from src 392 bool add_multiple = (ex_control->in(0) == hidden_merge_mark); 393 // how wide was the destination phi_map, originally? 394 uint orig_width = region->req(); 395 396 if (add_multiple) { 397 add_n_reqs(region, ex_control); 398 add_n_reqs(phi_map->i_o(), ex_map->i_o()); 399 } else { 400 // ex_map has no merges, so we just add single edges everywhere 401 add_one_req(region, ex_control); 402 add_one_req(phi_map->i_o(), ex_map->i_o()); 403 } 404 for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) { 405 if (mms.is_empty()) { 406 // get a copy of the base memory, and patch some inputs into it 407 const TypePtr* adr_type = mms.adr_type(C); 408 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); 409 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); 410 mms.set_memory(phi); 411 // Prepare to append interesting stuff onto the newly sliced phi: 412 while (phi->req() > orig_width) phi->del_req(phi->req()-1); 413 } 414 // Append stuff from ex_map: 415 if (add_multiple) { 416 add_n_reqs(mms.memory(), mms.memory2()); 417 } else { 418 add_one_req(mms.memory(), mms.memory2()); 419 } 420 } 421 uint limit = ex_map->req(); 422 for (uint i = TypeFunc::Parms; i < limit; i++) { 423 // Skip everything in the JVMS after tos. (The ex_oop follows.) 424 if (i == tos) i = ex_jvms->monoff(); 425 Node* src = ex_map->in(i); 426 Node* dst = phi_map->in(i); 427 if (src != dst) { 428 PhiNode* phi; 429 if (dst->in(0) != region) { 430 dst = phi = PhiNode::make(region, dst, _gvn.type(dst)); 431 record_for_igvn(phi); 432 _gvn.set_type(phi, phi->type()); 433 phi_map->set_req(i, dst); 434 // Prepare to append interesting stuff onto the new phi: 435 while (dst->req() > orig_width) dst->del_req(dst->req()-1); 436 } else { 437 assert(dst->is_Phi(), "nobody else uses a hidden region"); 438 phi = dst->as_Phi(); 439 } 440 if (add_multiple && src->in(0) == ex_control) { 441 // Both are phis. 442 add_n_reqs(dst, src); 443 } else { 444 while (dst->req() < region->req()) add_one_req(dst, src); 445 } 446 const Type* srctype = _gvn.type(src); 447 if (phi->type() != srctype) { 448 const Type* dsttype = phi->type()->meet_speculative(srctype); 449 if (phi->type() != dsttype) { 450 phi->set_type(dsttype); 451 _gvn.set_type(phi, dsttype); 452 } 453 } 454 } 455 } 456 phi_map->merge_replaced_nodes_with(ex_map); 457 } 458 459 //--------------------------use_exception_state-------------------------------- 460 Node* GraphKit::use_exception_state(SafePointNode* phi_map) { 461 if (failing()) { stop(); return top(); } 462 Node* region = phi_map->control(); 463 Node* hidden_merge_mark = root(); 464 assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation"); 465 Node* ex_oop = clear_saved_ex_oop(phi_map); 466 if (region->in(0) == hidden_merge_mark) { 467 // Special marking for internal ex-states. Process the phis now. 468 region->set_req(0, region); // now it's an ordinary region 469 set_jvms(phi_map->jvms()); // ...so now we can use it as a map 470 // Note: Setting the jvms also sets the bci and sp. 471 set_control(_gvn.transform(region)); 472 uint tos = jvms()->stkoff() + sp(); 473 for (uint i = 1; i < tos; i++) { 474 Node* x = phi_map->in(i); 475 if (x->in(0) == region) { 476 assert(x->is_Phi(), "expected a special phi"); 477 phi_map->set_req(i, _gvn.transform(x)); 478 } 479 } 480 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { 481 Node* x = mms.memory(); 482 if (x->in(0) == region) { 483 assert(x->is_Phi(), "nobody else uses a hidden region"); 484 mms.set_memory(_gvn.transform(x)); 485 } 486 } 487 if (ex_oop->in(0) == region) { 488 assert(ex_oop->is_Phi(), "expected a special phi"); 489 ex_oop = _gvn.transform(ex_oop); 490 } 491 } else { 492 set_jvms(phi_map->jvms()); 493 } 494 495 assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared"); 496 assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared"); 497 return ex_oop; 498 } 499 500 //---------------------------------java_bc------------------------------------- 501 Bytecodes::Code GraphKit::java_bc() const { 502 ciMethod* method = this->method(); 503 int bci = this->bci(); 504 if (method != nullptr && bci != InvocationEntryBci) 505 return method->java_code_at_bci(bci); 506 else 507 return Bytecodes::_illegal; 508 } 509 510 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, 511 bool must_throw) { 512 // if the exception capability is set, then we will generate code 513 // to check the JavaThread.should_post_on_exceptions flag to see 514 // if we actually need to report exception events (for this 515 // thread). If we don't need to report exception events, we will 516 // take the normal fast path provided by add_exception_events. If 517 // exception event reporting is enabled for this thread, we will 518 // take the uncommon_trap in the BuildCutout below. 519 520 // first must access the should_post_on_exceptions_flag in this thread's JavaThread 521 Node* jthread = _gvn.transform(new ThreadLocalNode()); 522 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); 523 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); 524 525 // Test the should_post_on_exceptions_flag vs. 0 526 Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) ); 527 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); 528 529 // Branch to slow_path if should_post_on_exceptions_flag was true 530 { BuildCutout unless(this, tst, PROB_MAX); 531 // Do not try anything fancy if we're notifying the VM on every throw. 532 // Cf. case Bytecodes::_athrow in parse2.cpp. 533 uncommon_trap(reason, Deoptimization::Action_none, 534 (ciKlass*)nullptr, (char*)nullptr, must_throw); 535 } 536 537 } 538 539 //------------------------------builtin_throw---------------------------------- 540 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { 541 bool must_throw = true; 542 543 // If this particular condition has not yet happened at this 544 // bytecode, then use the uncommon trap mechanism, and allow for 545 // a future recompilation if several traps occur here. 546 // If the throw is hot, try to use a more complicated inline mechanism 547 // which keeps execution inside the compiled code. 548 bool treat_throw_as_hot = false; 549 ciMethodData* md = method()->method_data(); 550 551 if (ProfileTraps) { 552 if (too_many_traps(reason)) { 553 treat_throw_as_hot = true; 554 } 555 // (If there is no MDO at all, assume it is early in 556 // execution, and that any deopts are part of the 557 // startup transient, and don't need to be remembered.) 558 559 // Also, if there is a local exception handler, treat all throws 560 // as hot if there has been at least one in this method. 561 if (C->trap_count(reason) != 0 562 && method()->method_data()->trap_count(reason) != 0 563 && has_exception_handler()) { 564 treat_throw_as_hot = true; 565 } 566 } 567 568 // If this throw happens frequently, an uncommon trap might cause 569 // a performance pothole. If there is a local exception handler, 570 // and if this particular bytecode appears to be deoptimizing often, 571 // let us handle the throw inline, with a preconstructed instance. 572 // Note: If the deopt count has blown up, the uncommon trap 573 // runtime is going to flush this nmethod, not matter what. 574 if (treat_throw_as_hot && method()->can_omit_stack_trace()) { 575 // If the throw is local, we use a pre-existing instance and 576 // punt on the backtrace. This would lead to a missing backtrace 577 // (a repeat of 4292742) if the backtrace object is ever asked 578 // for its backtrace. 579 // Fixing this remaining case of 4292742 requires some flavor of 580 // escape analysis. Leave that for the future. 581 ciInstance* ex_obj = nullptr; 582 switch (reason) { 583 case Deoptimization::Reason_null_check: 584 ex_obj = env()->NullPointerException_instance(); 585 break; 586 case Deoptimization::Reason_div0_check: 587 ex_obj = env()->ArithmeticException_instance(); 588 break; 589 case Deoptimization::Reason_range_check: 590 ex_obj = env()->ArrayIndexOutOfBoundsException_instance(); 591 break; 592 case Deoptimization::Reason_class_check: 593 ex_obj = env()->ClassCastException_instance(); 594 break; 595 case Deoptimization::Reason_array_check: 596 ex_obj = env()->ArrayStoreException_instance(); 597 break; 598 default: 599 break; 600 } 601 if (failing()) { stop(); return; } // exception allocation might fail 602 if (ex_obj != nullptr) { 603 if (env()->jvmti_can_post_on_exceptions()) { 604 // check if we must post exception events, take uncommon trap if so 605 uncommon_trap_if_should_post_on_exceptions(reason, must_throw); 606 // here if should_post_on_exceptions is false 607 // continue on with the normal codegen 608 } 609 610 // Cheat with a preallocated exception object. 611 if (C->log() != nullptr) 612 C->log()->elem("hot_throw preallocated='1' reason='%s'", 613 Deoptimization::trap_reason_name(reason)); 614 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); 615 Node* ex_node = _gvn.transform(ConNode::make(ex_con)); 616 617 // Clear the detail message of the preallocated exception object. 618 // Weblogic sometimes mutates the detail message of exceptions 619 // using reflection. 620 int offset = java_lang_Throwable::get_detailMessage_offset(); 621 const TypePtr* adr_typ = ex_con->add_offset(offset); 622 623 Node *adr = basic_plus_adr(ex_node, ex_node, offset); 624 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); 625 Node *store = access_store_at(ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP); 626 627 if (!method()->has_exception_handlers()) { 628 // We don't need to preserve the stack if there's no handler as the entire frame is going to be popped anyway. 629 // This prevents issues with exception handling and late inlining. 630 set_sp(0); 631 clean_stack(0); 632 } 633 634 add_exception_state(make_exception_state(ex_node)); 635 return; 636 } 637 } 638 639 // %%% Maybe add entry to OptoRuntime which directly throws the exc.? 640 // It won't be much cheaper than bailing to the interp., since we'll 641 // have to pass up all the debug-info, and the runtime will have to 642 // create the stack trace. 643 644 // Usual case: Bail to interpreter. 645 // Reserve the right to recompile if we haven't seen anything yet. 646 647 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : nullptr; 648 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; 649 if (treat_throw_as_hot 650 && (method()->method_data()->trap_recompiled_at(bci(), m) 651 || C->too_many_traps(reason))) { 652 // We cannot afford to take more traps here. Suffer in the interpreter. 653 if (C->log() != nullptr) 654 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", 655 Deoptimization::trap_reason_name(reason), 656 C->trap_count(reason)); 657 action = Deoptimization::Action_none; 658 } 659 660 // "must_throw" prunes the JVM state to include only the stack, if there 661 // are no local exception handlers. This should cut down on register 662 // allocation time and code size, by drastically reducing the number 663 // of in-edges on the call to the uncommon trap. 664 665 uncommon_trap(reason, action, (ciKlass*)nullptr, (char*)nullptr, must_throw); 666 } 667 668 669 //----------------------------PreserveJVMState--------------------------------- 670 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { 671 debug_only(kit->verify_map()); 672 _kit = kit; 673 _map = kit->map(); // preserve the map 674 _sp = kit->sp(); 675 kit->set_map(clone_map ? kit->clone_map() : nullptr); 676 #ifdef ASSERT 677 _bci = kit->bci(); 678 Parse* parser = kit->is_Parse(); 679 int block = (parser == nullptr || parser->block() == nullptr) ? -1 : parser->block()->rpo(); 680 _block = block; 681 #endif 682 } 683 PreserveJVMState::~PreserveJVMState() { 684 GraphKit* kit = _kit; 685 #ifdef ASSERT 686 assert(kit->bci() == _bci, "bci must not shift"); 687 Parse* parser = kit->is_Parse(); 688 int block = (parser == nullptr || parser->block() == nullptr) ? -1 : parser->block()->rpo(); 689 assert(block == _block, "block must not shift"); 690 #endif 691 kit->set_map(_map); 692 kit->set_sp(_sp); 693 } 694 695 696 //-----------------------------BuildCutout------------------------------------- 697 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt) 698 : PreserveJVMState(kit) 699 { 700 assert(p->is_Con() || p->is_Bool(), "test must be a bool"); 701 SafePointNode* outer_map = _map; // preserved map is caller's 702 SafePointNode* inner_map = kit->map(); 703 IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); 704 outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) )); 705 inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) )); 706 } 707 BuildCutout::~BuildCutout() { 708 GraphKit* kit = _kit; 709 assert(kit->stopped(), "cutout code must stop, throw, return, etc."); 710 } 711 712 //---------------------------PreserveReexecuteState---------------------------- 713 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) { 714 assert(!kit->stopped(), "must call stopped() before"); 715 _kit = kit; 716 _sp = kit->sp(); 717 _reexecute = kit->jvms()->_reexecute; 718 } 719 PreserveReexecuteState::~PreserveReexecuteState() { 720 if (_kit->stopped()) return; 721 _kit->jvms()->_reexecute = _reexecute; 722 _kit->set_sp(_sp); 723 } 724 725 //------------------------------clone_map-------------------------------------- 726 // Implementation of PreserveJVMState 727 // 728 // Only clone_map(...) here. If this function is only used in the 729 // PreserveJVMState class we may want to get rid of this extra 730 // function eventually and do it all there. 731 732 SafePointNode* GraphKit::clone_map() { 733 if (map() == nullptr) return nullptr; 734 735 // Clone the memory edge first 736 Node* mem = MergeMemNode::make(map()->memory()); 737 gvn().set_type_bottom(mem); 738 739 SafePointNode *clonemap = (SafePointNode*)map()->clone(); 740 JVMState* jvms = this->jvms(); 741 JVMState* clonejvms = jvms->clone_shallow(C); 742 clonemap->set_memory(mem); 743 clonemap->set_jvms(clonejvms); 744 clonejvms->set_map(clonemap); 745 record_for_igvn(clonemap); 746 gvn().set_type_bottom(clonemap); 747 return clonemap; 748 } 749 750 //-----------------------------destruct_map_clone------------------------------ 751 // 752 // Order of destruct is important to increase the likelyhood that memory can be re-used. We need 753 // to destruct/free/delete in the exact opposite order as clone_map(). 754 void GraphKit::destruct_map_clone(SafePointNode* sfp) { 755 if (sfp == nullptr) return; 756 757 Node* mem = sfp->memory(); 758 JVMState* jvms = sfp->jvms(); 759 760 if (jvms != nullptr) { 761 delete jvms; 762 } 763 764 remove_for_igvn(sfp); 765 gvn().clear_type(sfp); 766 sfp->destruct(&_gvn); 767 768 if (mem != nullptr) { 769 gvn().clear_type(mem); 770 mem->destruct(&_gvn); 771 } 772 } 773 774 //-----------------------------set_map_clone----------------------------------- 775 void GraphKit::set_map_clone(SafePointNode* m) { 776 _map = m; 777 _map = clone_map(); 778 _map->set_next_exception(nullptr); 779 debug_only(verify_map()); 780 } 781 782 783 //----------------------------kill_dead_locals--------------------------------- 784 // Detect any locals which are known to be dead, and force them to top. 785 void GraphKit::kill_dead_locals() { 786 // Consult the liveness information for the locals. If any 787 // of them are unused, then they can be replaced by top(). This 788 // should help register allocation time and cut down on the size 789 // of the deoptimization information. 790 791 // This call is made from many of the bytecode handling 792 // subroutines called from the Big Switch in do_one_bytecode. 793 // Every bytecode which might include a slow path is responsible 794 // for killing its dead locals. The more consistent we 795 // are about killing deads, the fewer useless phis will be 796 // constructed for them at various merge points. 797 798 // bci can be -1 (InvocationEntryBci). We return the entry 799 // liveness for the method. 800 801 if (method() == nullptr || method()->code_size() == 0) { 802 // We are building a graph for a call to a native method. 803 // All locals are live. 804 return; 805 } 806 807 ResourceMark rm; 808 809 // Consult the liveness information for the locals. If any 810 // of them are unused, then they can be replaced by top(). This 811 // should help register allocation time and cut down on the size 812 // of the deoptimization information. 813 MethodLivenessResult live_locals = method()->liveness_at_bci(bci()); 814 815 int len = (int)live_locals.size(); 816 assert(len <= jvms()->loc_size(), "too many live locals"); 817 for (int local = 0; local < len; local++) { 818 if (!live_locals.at(local)) { 819 set_local(local, top()); 820 } 821 } 822 } 823 824 #ifdef ASSERT 825 //-------------------------dead_locals_are_killed------------------------------ 826 // Return true if all dead locals are set to top in the map. 827 // Used to assert "clean" debug info at various points. 828 bool GraphKit::dead_locals_are_killed() { 829 if (method() == nullptr || method()->code_size() == 0) { 830 // No locals need to be dead, so all is as it should be. 831 return true; 832 } 833 834 // Make sure somebody called kill_dead_locals upstream. 835 ResourceMark rm; 836 for (JVMState* jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 837 if (jvms->loc_size() == 0) continue; // no locals to consult 838 SafePointNode* map = jvms->map(); 839 ciMethod* method = jvms->method(); 840 int bci = jvms->bci(); 841 if (jvms == this->jvms()) { 842 bci = this->bci(); // it might not yet be synched 843 } 844 MethodLivenessResult live_locals = method->liveness_at_bci(bci); 845 int len = (int)live_locals.size(); 846 if (!live_locals.is_valid() || len == 0) 847 // This method is trivial, or is poisoned by a breakpoint. 848 return true; 849 assert(len == jvms->loc_size(), "live map consistent with locals map"); 850 for (int local = 0; local < len; local++) { 851 if (!live_locals.at(local) && map->local(jvms, local) != top()) { 852 if (PrintMiscellaneous && (Verbose || WizardMode)) { 853 tty->print_cr("Zombie local %d: ", local); 854 jvms->dump(); 855 } 856 return false; 857 } 858 } 859 } 860 return true; 861 } 862 863 #endif //ASSERT 864 865 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens. 866 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) { 867 ciMethod* cur_method = jvms->method(); 868 int cur_bci = jvms->bci(); 869 if (cur_method != nullptr && cur_bci != InvocationEntryBci) { 870 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); 871 return Interpreter::bytecode_should_reexecute(code) || 872 (is_anewarray && (code == Bytecodes::_multianewarray)); 873 // Reexecute _multianewarray bytecode which was replaced with 874 // sequence of [a]newarray. See Parse::do_multianewarray(). 875 // 876 // Note: interpreter should not have it set since this optimization 877 // is limited by dimensions and guarded by flag so in some cases 878 // multianewarray() runtime calls will be generated and 879 // the bytecode should not be reexecutes (stack will not be reset). 880 } else { 881 return false; 882 } 883 } 884 885 // Helper function for adding JVMState and debug information to node 886 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { 887 // Add the safepoint edges to the call (or other safepoint). 888 889 // Make sure dead locals are set to top. This 890 // should help register allocation time and cut down on the size 891 // of the deoptimization information. 892 assert(dead_locals_are_killed(), "garbage in debug info before safepoint"); 893 894 // Walk the inline list to fill in the correct set of JVMState's 895 // Also fill in the associated edges for each JVMState. 896 897 // If the bytecode needs to be reexecuted we need to put 898 // the arguments back on the stack. 899 const bool should_reexecute = jvms()->should_reexecute(); 900 JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms(); 901 902 // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to 903 // undefined if the bci is different. This is normal for Parse but it 904 // should not happen for LibraryCallKit because only one bci is processed. 905 assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute), 906 "in LibraryCallKit the reexecute bit should not change"); 907 908 // If we are guaranteed to throw, we can prune everything but the 909 // input to the current bytecode. 910 bool can_prune_locals = false; 911 uint stack_slots_not_pruned = 0; 912 int inputs = 0, depth = 0; 913 if (must_throw) { 914 assert(method() == youngest_jvms->method(), "sanity"); 915 if (compute_stack_effects(inputs, depth)) { 916 can_prune_locals = true; 917 stack_slots_not_pruned = inputs; 918 } 919 } 920 921 if (env()->should_retain_local_variables()) { 922 // At any safepoint, this method can get breakpointed, which would 923 // then require an immediate deoptimization. 924 can_prune_locals = false; // do not prune locals 925 stack_slots_not_pruned = 0; 926 } 927 928 // do not scribble on the input jvms 929 JVMState* out_jvms = youngest_jvms->clone_deep(C); 930 call->set_jvms(out_jvms); // Start jvms list for call node 931 932 // For a known set of bytecodes, the interpreter should reexecute them if 933 // deoptimization happens. We set the reexecute state for them here 934 if (out_jvms->is_reexecute_undefined() && //don't change if already specified 935 should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) { 936 #ifdef ASSERT 937 int inputs = 0, not_used; // initialized by GraphKit::compute_stack_effects() 938 assert(method() == youngest_jvms->method(), "sanity"); 939 assert(compute_stack_effects(inputs, not_used), "unknown bytecode: %s", Bytecodes::name(java_bc())); 940 assert(out_jvms->sp() >= (uint)inputs, "not enough operands for reexecution"); 941 #endif // ASSERT 942 out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed 943 } 944 945 // Presize the call: 946 DEBUG_ONLY(uint non_debug_edges = call->req()); 947 call->add_req_batch(top(), youngest_jvms->debug_depth()); 948 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), ""); 949 950 // Set up edges so that the call looks like this: 951 // Call [state:] ctl io mem fptr retadr 952 // [parms:] parm0 ... parmN 953 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN 954 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...] 955 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN 956 // Note that caller debug info precedes callee debug info. 957 958 // Fill pointer walks backwards from "young:" to "root:" in the diagram above: 959 uint debug_ptr = call->req(); 960 961 // Loop over the map input edges associated with jvms, add them 962 // to the call node, & reset all offsets to match call node array. 963 964 JVMState* callee_jvms = nullptr; 965 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) { 966 uint debug_end = debug_ptr; 967 uint debug_start = debug_ptr - in_jvms->debug_size(); 968 debug_ptr = debug_start; // back up the ptr 969 970 uint p = debug_start; // walks forward in [debug_start, debug_end) 971 uint j, k, l; 972 SafePointNode* in_map = in_jvms->map(); 973 out_jvms->set_map(call); 974 975 if (can_prune_locals) { 976 assert(in_jvms->method() == out_jvms->method(), "sanity"); 977 // If the current throw can reach an exception handler in this JVMS, 978 // then we must keep everything live that can reach that handler. 979 // As a quick and dirty approximation, we look for any handlers at all. 980 if (in_jvms->method()->has_exception_handlers()) { 981 can_prune_locals = false; 982 } 983 } 984 985 // Add the Locals 986 k = in_jvms->locoff(); 987 l = in_jvms->loc_size(); 988 out_jvms->set_locoff(p); 989 if (!can_prune_locals) { 990 for (j = 0; j < l; j++) { 991 Node* val = in_map->in(k + j); 992 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state 993 if (val->is_InlineType() && val->isa_InlineType()->is_larval() && callee_jvms != nullptr && 994 callee_jvms->method()->is_object_constructor() && callee_jvms->method()->holder()->is_inlinetype() && val == in_map->argument(in_jvms, 0)) { 995 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver 996 } 997 call->set_req(p++, val); 998 } 999 } else { 1000 p += l; // already set to top above by add_req_batch 1001 } 1002 1003 // Add the Expression Stack 1004 k = in_jvms->stkoff(); 1005 l = in_jvms->sp(); 1006 out_jvms->set_stkoff(p); 1007 if (!can_prune_locals) { 1008 for (j = 0; j < l; j++) { 1009 Node* val = in_map->in(k + j); 1010 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state 1011 if (val->is_InlineType() && val->isa_InlineType()->is_larval() && callee_jvms != nullptr && 1012 callee_jvms->method()->is_object_constructor() && callee_jvms->method()->holder()->is_inlinetype() && val == in_map->argument(in_jvms, 0)) { 1013 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver 1014 } 1015 call->set_req(p++, val); 1016 } 1017 } else if (can_prune_locals && stack_slots_not_pruned != 0) { 1018 // Divide stack into {S0,...,S1}, where S0 is set to top. 1019 uint s1 = stack_slots_not_pruned; 1020 stack_slots_not_pruned = 0; // for next iteration 1021 if (s1 > l) s1 = l; 1022 uint s0 = l - s1; 1023 p += s0; // skip the tops preinstalled by add_req_batch 1024 for (j = s0; j < l; j++) 1025 call->set_req(p++, in_map->in(k+j)); 1026 } else { 1027 p += l; // already set to top above by add_req_batch 1028 } 1029 1030 // Add the Monitors 1031 k = in_jvms->monoff(); 1032 l = in_jvms->mon_size(); 1033 out_jvms->set_monoff(p); 1034 for (j = 0; j < l; j++) 1035 call->set_req(p++, in_map->in(k+j)); 1036 1037 // Copy any scalar object fields. 1038 k = in_jvms->scloff(); 1039 l = in_jvms->scl_size(); 1040 out_jvms->set_scloff(p); 1041 for (j = 0; j < l; j++) 1042 call->set_req(p++, in_map->in(k+j)); 1043 1044 // Finish the new jvms. 1045 out_jvms->set_endoff(p); 1046 1047 assert(out_jvms->endoff() == debug_end, "fill ptr must match"); 1048 assert(out_jvms->depth() == in_jvms->depth(), "depth must match"); 1049 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match"); 1050 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match"); 1051 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match"); 1052 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match"); 1053 1054 // Update the two tail pointers in parallel. 1055 callee_jvms = out_jvms; 1056 out_jvms = out_jvms->caller(); 1057 in_jvms = in_jvms->caller(); 1058 } 1059 1060 assert(debug_ptr == non_debug_edges, "debug info must fit exactly"); 1061 1062 // Test the correctness of JVMState::debug_xxx accessors: 1063 assert(call->jvms()->debug_start() == non_debug_edges, ""); 1064 assert(call->jvms()->debug_end() == call->req(), ""); 1065 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, ""); 1066 } 1067 1068 bool GraphKit::compute_stack_effects(int& inputs, int& depth) { 1069 Bytecodes::Code code = java_bc(); 1070 if (code == Bytecodes::_wide) { 1071 code = method()->java_code_at_bci(bci() + 1); 1072 } 1073 1074 if (code != Bytecodes::_illegal) { 1075 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1 1076 } 1077 1078 auto rsize = [&]() { 1079 assert(code != Bytecodes::_illegal, "code is illegal!"); 1080 BasicType rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V 1081 return (rtype < T_CONFLICT) ? type2size[rtype] : 0; 1082 }; 1083 1084 switch (code) { 1085 case Bytecodes::_illegal: 1086 return false; 1087 1088 case Bytecodes::_ldc: 1089 case Bytecodes::_ldc_w: 1090 case Bytecodes::_ldc2_w: 1091 inputs = 0; 1092 break; 1093 1094 case Bytecodes::_dup: inputs = 1; break; 1095 case Bytecodes::_dup_x1: inputs = 2; break; 1096 case Bytecodes::_dup_x2: inputs = 3; break; 1097 case Bytecodes::_dup2: inputs = 2; break; 1098 case Bytecodes::_dup2_x1: inputs = 3; break; 1099 case Bytecodes::_dup2_x2: inputs = 4; break; 1100 case Bytecodes::_swap: inputs = 2; break; 1101 case Bytecodes::_arraylength: inputs = 1; break; 1102 1103 case Bytecodes::_getstatic: 1104 case Bytecodes::_putstatic: 1105 case Bytecodes::_getfield: 1106 case Bytecodes::_putfield: 1107 { 1108 bool ignored_will_link; 1109 ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); 1110 int size = field->type()->size(); 1111 bool is_get = (depth >= 0), is_static = (depth & 1); 1112 inputs = (is_static ? 0 : 1); 1113 if (is_get) { 1114 depth = size - inputs; 1115 } else { 1116 inputs += size; // putxxx pops the value from the stack 1117 depth = - inputs; 1118 } 1119 } 1120 break; 1121 1122 case Bytecodes::_invokevirtual: 1123 case Bytecodes::_invokespecial: 1124 case Bytecodes::_invokestatic: 1125 case Bytecodes::_invokedynamic: 1126 case Bytecodes::_invokeinterface: 1127 { 1128 bool ignored_will_link; 1129 ciSignature* declared_signature = nullptr; 1130 ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 1131 assert(declared_signature != nullptr, "cannot be null"); 1132 inputs = declared_signature->arg_size_for_bc(code); 1133 int size = declared_signature->return_type()->size(); 1134 depth = size - inputs; 1135 } 1136 break; 1137 1138 case Bytecodes::_multianewarray: 1139 { 1140 ciBytecodeStream iter(method()); 1141 iter.reset_to_bci(bci()); 1142 iter.next(); 1143 inputs = iter.get_dimensions(); 1144 assert(rsize() == 1, ""); 1145 depth = 1 - inputs; 1146 } 1147 break; 1148 1149 case Bytecodes::_ireturn: 1150 case Bytecodes::_lreturn: 1151 case Bytecodes::_freturn: 1152 case Bytecodes::_dreturn: 1153 case Bytecodes::_areturn: 1154 assert(rsize() == -depth, ""); 1155 inputs = -depth; 1156 break; 1157 1158 case Bytecodes::_jsr: 1159 case Bytecodes::_jsr_w: 1160 inputs = 0; 1161 depth = 1; // S.B. depth=1, not zero 1162 break; 1163 1164 default: 1165 // bytecode produces a typed result 1166 inputs = rsize() - depth; 1167 assert(inputs >= 0, ""); 1168 break; 1169 } 1170 1171 #ifdef ASSERT 1172 // spot check 1173 int outputs = depth + inputs; 1174 assert(outputs >= 0, "sanity"); 1175 switch (code) { 1176 case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break; 1177 case Bytecodes::_athrow: assert(inputs == 1 && outputs == 0, ""); break; 1178 case Bytecodes::_aload_0: assert(inputs == 0 && outputs == 1, ""); break; 1179 case Bytecodes::_return: assert(inputs == 0 && outputs == 0, ""); break; 1180 case Bytecodes::_drem: assert(inputs == 4 && outputs == 2, ""); break; 1181 default: break; 1182 } 1183 #endif //ASSERT 1184 1185 return true; 1186 } 1187 1188 1189 1190 //------------------------------basic_plus_adr--------------------------------- 1191 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { 1192 // short-circuit a common case 1193 if (offset == intcon(0)) return ptr; 1194 return _gvn.transform( new AddPNode(base, ptr, offset) ); 1195 } 1196 1197 Node* GraphKit::ConvI2L(Node* offset) { 1198 // short-circuit a common case 1199 jint offset_con = find_int_con(offset, Type::OffsetBot); 1200 if (offset_con != Type::OffsetBot) { 1201 return longcon((jlong) offset_con); 1202 } 1203 return _gvn.transform( new ConvI2LNode(offset)); 1204 } 1205 1206 Node* GraphKit::ConvI2UL(Node* offset) { 1207 juint offset_con = (juint) find_int_con(offset, Type::OffsetBot); 1208 if (offset_con != (juint) Type::OffsetBot) { 1209 return longcon((julong) offset_con); 1210 } 1211 Node* conv = _gvn.transform( new ConvI2LNode(offset)); 1212 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint)); 1213 return _gvn.transform( new AndLNode(conv, mask) ); 1214 } 1215 1216 Node* GraphKit::ConvL2I(Node* offset) { 1217 // short-circuit a common case 1218 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); 1219 if (offset_con != (jlong)Type::OffsetBot) { 1220 return intcon((int) offset_con); 1221 } 1222 return _gvn.transform( new ConvL2INode(offset)); 1223 } 1224 1225 //-------------------------load_object_klass----------------------------------- 1226 Node* GraphKit::load_object_klass(Node* obj) { 1227 // Special-case a fresh allocation to avoid building nodes: 1228 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); 1229 if (akls != nullptr) return akls; 1230 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); 1231 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); 1232 } 1233 1234 //-------------------------load_array_length----------------------------------- 1235 Node* GraphKit::load_array_length(Node* array) { 1236 // Special-case a fresh allocation to avoid building nodes: 1237 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array); 1238 Node *alen; 1239 if (alloc == nullptr) { 1240 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); 1241 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); 1242 } else { 1243 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false); 1244 } 1245 return alen; 1246 } 1247 1248 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc, 1249 const TypeOopPtr* oop_type, 1250 bool replace_length_in_map) { 1251 Node* length = alloc->Ideal_length(); 1252 if (replace_length_in_map == false || map()->find_edge(length) >= 0) { 1253 Node* ccast = alloc->make_ideal_length(oop_type, &_gvn); 1254 if (ccast != length) { 1255 // do not transform ccast here, it might convert to top node for 1256 // negative array length and break assumptions in parsing stage. 1257 _gvn.set_type_bottom(ccast); 1258 record_for_igvn(ccast); 1259 if (replace_length_in_map) { 1260 replace_in_map(length, ccast); 1261 } 1262 return ccast; 1263 } 1264 } 1265 return length; 1266 } 1267 1268 //------------------------------do_null_check---------------------------------- 1269 // Helper function to do a null pointer check. Returned value is 1270 // the incoming address with null casted away. You are allowed to use the 1271 // not-null value only if you are control dependent on the test. 1272 #ifndef PRODUCT 1273 extern uint explicit_null_checks_inserted, 1274 explicit_null_checks_elided; 1275 #endif 1276 Node* GraphKit::null_check_common(Node* value, BasicType type, 1277 // optional arguments for variations: 1278 bool assert_null, 1279 Node* *null_control, 1280 bool speculative, 1281 bool is_init_check) { 1282 assert(!assert_null || null_control == nullptr, "not both at once"); 1283 if (stopped()) return top(); 1284 NOT_PRODUCT(explicit_null_checks_inserted++); 1285 1286 if (value->is_InlineType()) { 1287 // Null checking a scalarized but nullable inline type. Check the IsInit 1288 // input instead of the oop input to avoid keeping buffer allocations alive. 1289 InlineTypeNode* vtptr = value->as_InlineType(); 1290 while (vtptr->get_oop()->is_InlineType()) { 1291 vtptr = vtptr->get_oop()->as_InlineType(); 1292 } 1293 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true); 1294 if (stopped()) { 1295 return top(); 1296 } 1297 if (assert_null) { 1298 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts) 1299 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass()); 1300 // replace_in_map(value, vtptr); 1301 // return vtptr; 1302 return null(); 1303 } 1304 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top()); 1305 return cast_not_null(value, do_replace_in_map); 1306 } 1307 1308 // Construct null check 1309 Node *chk = nullptr; 1310 switch(type) { 1311 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; 1312 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; 1313 case T_ARRAY : // fall through 1314 type = T_OBJECT; // simplify further tests 1315 case T_OBJECT : { 1316 const Type *t = _gvn.type( value ); 1317 1318 const TypeOopPtr* tp = t->isa_oopptr(); 1319 if (tp != nullptr && !tp->is_loaded() 1320 // Only for do_null_check, not any of its siblings: 1321 && !assert_null && null_control == nullptr) { 1322 // Usually, any field access or invocation on an unloaded oop type 1323 // will simply fail to link, since the statically linked class is 1324 // likely also to be unloaded. However, in -Xcomp mode, sometimes 1325 // the static class is loaded but the sharper oop type is not. 1326 // Rather than checking for this obscure case in lots of places, 1327 // we simply observe that a null check on an unloaded class 1328 // will always be followed by a nonsense operation, so we 1329 // can just issue the uncommon trap here. 1330 // Our access to the unloaded class will only be correct 1331 // after it has been loaded and initialized, which requires 1332 // a trip through the interpreter. 1333 ciKlass* klass = tp->unloaded_klass(); 1334 #ifndef PRODUCT 1335 if (WizardMode) { tty->print("Null check of unloaded "); klass->print(); tty->cr(); } 1336 #endif 1337 uncommon_trap(Deoptimization::Reason_unloaded, 1338 Deoptimization::Action_reinterpret, 1339 klass, "!loaded"); 1340 return top(); 1341 } 1342 1343 if (assert_null) { 1344 // See if the type is contained in NULL_PTR. 1345 // If so, then the value is already null. 1346 if (t->higher_equal(TypePtr::NULL_PTR)) { 1347 NOT_PRODUCT(explicit_null_checks_elided++); 1348 return value; // Elided null assert quickly! 1349 } 1350 } else { 1351 // See if mixing in the null pointer changes type. 1352 // If so, then the null pointer was not allowed in the original 1353 // type. In other words, "value" was not-null. 1354 if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { 1355 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... 1356 NOT_PRODUCT(explicit_null_checks_elided++); 1357 return value; // Elided null check quickly! 1358 } 1359 } 1360 chk = new CmpPNode( value, null() ); 1361 break; 1362 } 1363 1364 default: 1365 fatal("unexpected type: %s", type2name(type)); 1366 } 1367 assert(chk != nullptr, "sanity check"); 1368 chk = _gvn.transform(chk); 1369 1370 BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; 1371 BoolNode *btst = new BoolNode( chk, btest); 1372 Node *tst = _gvn.transform( btst ); 1373 1374 //----------- 1375 // if peephole optimizations occurred, a prior test existed. 1376 // If a prior test existed, maybe it dominates as we can avoid this test. 1377 if (tst != btst && type == T_OBJECT) { 1378 // At this point we want to scan up the CFG to see if we can 1379 // find an identical test (and so avoid this test altogether). 1380 Node *cfg = control(); 1381 int depth = 0; 1382 while( depth < 16 ) { // Limit search depth for speed 1383 if( cfg->Opcode() == Op_IfTrue && 1384 cfg->in(0)->in(1) == tst ) { 1385 // Found prior test. Use "cast_not_null" to construct an identical 1386 // CastPP (and hence hash to) as already exists for the prior test. 1387 // Return that casted value. 1388 if (assert_null) { 1389 replace_in_map(value, null()); 1390 return null(); // do not issue the redundant test 1391 } 1392 Node *oldcontrol = control(); 1393 set_control(cfg); 1394 Node *res = cast_not_null(value); 1395 set_control(oldcontrol); 1396 NOT_PRODUCT(explicit_null_checks_elided++); 1397 return res; 1398 } 1399 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true); 1400 if (cfg == nullptr) break; // Quit at region nodes 1401 depth++; 1402 } 1403 } 1404 1405 //----------- 1406 // Branch to failure if null 1407 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen 1408 Deoptimization::DeoptReason reason; 1409 if (assert_null) { 1410 reason = Deoptimization::reason_null_assert(speculative); 1411 } else if (type == T_OBJECT || is_init_check) { 1412 reason = Deoptimization::reason_null_check(speculative); 1413 } else { 1414 reason = Deoptimization::Reason_div0_check; 1415 } 1416 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis, 1417 // ciMethodData::has_trap_at will return a conservative -1 if any 1418 // must-be-null assertion has failed. This could cause performance 1419 // problems for a method after its first do_null_assert failure. 1420 // Consider using 'Reason_class_check' instead? 1421 1422 // To cause an implicit null check, we set the not-null probability 1423 // to the maximum (PROB_MAX). For an explicit check the probability 1424 // is set to a smaller value. 1425 if (null_control != nullptr || too_many_traps(reason)) { 1426 // probability is less likely 1427 ok_prob = PROB_LIKELY_MAG(3); 1428 } else if (!assert_null && 1429 (ImplicitNullCheckThreshold > 0) && 1430 method() != nullptr && 1431 (method()->method_data()->trap_count(reason) 1432 >= (uint)ImplicitNullCheckThreshold)) { 1433 ok_prob = PROB_LIKELY_MAG(3); 1434 } 1435 1436 if (null_control != nullptr) { 1437 IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); 1438 Node* null_true = _gvn.transform( new IfFalseNode(iff)); 1439 set_control( _gvn.transform( new IfTrueNode(iff))); 1440 #ifndef PRODUCT 1441 if (null_true == top()) { 1442 explicit_null_checks_elided++; 1443 } 1444 #endif 1445 (*null_control) = null_true; 1446 } else { 1447 BuildCutout unless(this, tst, ok_prob); 1448 // Check for optimizer eliding test at parse time 1449 if (stopped()) { 1450 // Failure not possible; do not bother making uncommon trap. 1451 NOT_PRODUCT(explicit_null_checks_elided++); 1452 } else if (assert_null) { 1453 uncommon_trap(reason, 1454 Deoptimization::Action_make_not_entrant, 1455 nullptr, "assert_null"); 1456 } else { 1457 replace_in_map(value, zerocon(type)); 1458 builtin_throw(reason); 1459 } 1460 } 1461 1462 // Must throw exception, fall-thru not possible? 1463 if (stopped()) { 1464 return top(); // No result 1465 } 1466 1467 if (assert_null) { 1468 // Cast obj to null on this path. 1469 replace_in_map(value, zerocon(type)); 1470 return zerocon(type); 1471 } 1472 1473 // Cast obj to not-null on this path, if there is no null_control. 1474 // (If there is a null_control, a non-null value may come back to haunt us.) 1475 if (type == T_OBJECT) { 1476 Node* cast = cast_not_null(value, false); 1477 if (null_control == nullptr || (*null_control) == top()) 1478 replace_in_map(value, cast); 1479 value = cast; 1480 } 1481 1482 return value; 1483 } 1484 1485 //------------------------------cast_not_null---------------------------------- 1486 // Cast obj to not-null on this path 1487 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { 1488 if (obj->is_InlineType()) { 1489 // TODO 8325106 Can we avoid cloning? 1490 Node* vt = obj->clone(); 1491 vt->as_InlineType()->set_is_init(_gvn); 1492 vt = _gvn.transform(vt); 1493 if (do_replace_in_map) { 1494 replace_in_map(obj, vt); 1495 } 1496 return vt; 1497 } 1498 const Type *t = _gvn.type(obj); 1499 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); 1500 // Object is already not-null? 1501 if( t == t_not_null ) return obj; 1502 1503 Node* cast = new CastPPNode(control(), obj,t_not_null); 1504 cast = _gvn.transform( cast ); 1505 1506 // Scan for instances of 'obj' in the current JVM mapping. 1507 // These instances are known to be not-null after the test. 1508 if (do_replace_in_map) 1509 replace_in_map(obj, cast); 1510 1511 return cast; // Return casted value 1512 } 1513 1514 // Sometimes in intrinsics, we implicitly know an object is not null 1515 // (there's no actual null check) so we can cast it to not null. In 1516 // the course of optimizations, the input to the cast can become null. 1517 // In that case that data path will die and we need the control path 1518 // to become dead as well to keep the graph consistent. So we have to 1519 // add a check for null for which one branch can't be taken. It uses 1520 // an Opaque4 node that will cause the check to be removed after loop 1521 // opts so the test goes away and the compiled code doesn't execute a 1522 // useless check. 1523 Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) { 1524 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) { 1525 return value; 1526 } 1527 Node* chk = _gvn.transform(new CmpPNode(value, null())); 1528 Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne)); 1529 Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1))); 1530 IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN); 1531 _gvn.set_type(iff, iff->Value(&_gvn)); 1532 Node *if_f = _gvn.transform(new IfFalseNode(iff)); 1533 Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr)); 1534 Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic")); 1535 C->root()->add_req(halt); 1536 Node *if_t = _gvn.transform(new IfTrueNode(iff)); 1537 set_control(if_t); 1538 return cast_not_null(value, do_replace_in_map); 1539 } 1540 1541 1542 //--------------------------replace_in_map------------------------------------- 1543 void GraphKit::replace_in_map(Node* old, Node* neww) { 1544 if (old == neww) { 1545 return; 1546 } 1547 1548 map()->replace_edge(old, neww); 1549 1550 // Note: This operation potentially replaces any edge 1551 // on the map. This includes locals, stack, and monitors 1552 // of the current (innermost) JVM state. 1553 1554 // don't let inconsistent types from profiling escape this 1555 // method 1556 1557 const Type* told = _gvn.type(old); 1558 const Type* tnew = _gvn.type(neww); 1559 1560 if (!tnew->higher_equal(told)) { 1561 return; 1562 } 1563 1564 map()->record_replaced_node(old, neww); 1565 } 1566 1567 1568 //============================================================================= 1569 //--------------------------------memory--------------------------------------- 1570 Node* GraphKit::memory(uint alias_idx) { 1571 MergeMemNode* mem = merged_memory(); 1572 Node* p = mem->memory_at(alias_idx); 1573 assert(p != mem->empty_memory(), "empty"); 1574 _gvn.set_type(p, Type::MEMORY); // must be mapped 1575 return p; 1576 } 1577 1578 //-----------------------------reset_memory------------------------------------ 1579 Node* GraphKit::reset_memory() { 1580 Node* mem = map()->memory(); 1581 // do not use this node for any more parsing! 1582 debug_only( map()->set_memory((Node*)nullptr) ); 1583 return _gvn.transform( mem ); 1584 } 1585 1586 //------------------------------set_all_memory--------------------------------- 1587 void GraphKit::set_all_memory(Node* newmem) { 1588 Node* mergemem = MergeMemNode::make(newmem); 1589 gvn().set_type_bottom(mergemem); 1590 map()->set_memory(mergemem); 1591 } 1592 1593 //------------------------------set_all_memory_call---------------------------- 1594 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { 1595 Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) ); 1596 set_all_memory(newmem); 1597 } 1598 1599 //============================================================================= 1600 // 1601 // parser factory methods for MemNodes 1602 // 1603 // These are layered on top of the factory methods in LoadNode and StoreNode, 1604 // and integrate with the parser's memory state and _gvn engine. 1605 // 1606 1607 // factory methods in "int adr_idx" 1608 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 1609 int adr_idx, 1610 MemNode::MemOrd mo, 1611 LoadNode::ControlDependency control_dependency, 1612 bool require_atomic_access, 1613 bool unaligned, 1614 bool mismatched, 1615 bool unsafe, 1616 uint8_t barrier_data) { 1617 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); 1618 const TypePtr* adr_type = nullptr; // debug-mode-only argument 1619 debug_only(adr_type = C->get_adr_type(adr_idx)); 1620 Node* mem = memory(adr_idx); 1621 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data); 1622 ld = _gvn.transform(ld); 1623 1624 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { 1625 // Improve graph before escape analysis and boxing elimination. 1626 record_for_igvn(ld); 1627 if (ld->is_DecodeN()) { 1628 // Also record the actual load (LoadN) in case ld is DecodeN 1629 assert(ld->in(1)->Opcode() == Op_LoadN, "Assumption invalid: input to DecodeN is not LoadN"); 1630 record_for_igvn(ld->in(1)); 1631 } 1632 } 1633 return ld; 1634 } 1635 1636 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 1637 int adr_idx, 1638 MemNode::MemOrd mo, 1639 bool require_atomic_access, 1640 bool unaligned, 1641 bool mismatched, 1642 bool unsafe, 1643 int barrier_data) { 1644 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 1645 const TypePtr* adr_type = nullptr; 1646 debug_only(adr_type = C->get_adr_type(adr_idx)); 1647 Node *mem = memory(adr_idx); 1648 Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access); 1649 if (unaligned) { 1650 st->as_Store()->set_unaligned_access(); 1651 } 1652 if (mismatched) { 1653 st->as_Store()->set_mismatched_access(); 1654 } 1655 if (unsafe) { 1656 st->as_Store()->set_unsafe_access(); 1657 } 1658 st->as_Store()->set_barrier_data(barrier_data); 1659 st = _gvn.transform(st); 1660 set_memory(st, adr_idx); 1661 // Back-to-back stores can only remove intermediate store with DU info 1662 // so push on worklist for optimizer. 1663 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address)) 1664 record_for_igvn(st); 1665 1666 return st; 1667 } 1668 1669 Node* GraphKit::access_store_at(Node* obj, 1670 Node* adr, 1671 const TypePtr* adr_type, 1672 Node* val, 1673 const Type* val_type, 1674 BasicType bt, 1675 DecoratorSet decorators, 1676 bool safe_for_replace) { 1677 // Transformation of a value which could be null pointer (CastPP #null) 1678 // could be delayed during Parse (for example, in adjust_map_after_if()). 1679 // Execute transformation here to avoid barrier generation in such case. 1680 if (_gvn.type(val) == TypePtr::NULL_PTR) { 1681 val = _gvn.makecon(TypePtr::NULL_PTR); 1682 } 1683 1684 if (stopped()) { 1685 return top(); // Dead path ? 1686 } 1687 1688 assert(val != nullptr, "not dead path"); 1689 if (val->is_InlineType()) { 1690 // Store to non-flat field. Buffer the inline type and make sure 1691 // the store is re-executed if the allocation triggers deoptimization. 1692 PreserveReexecuteState preexecs(this); 1693 jvms()->set_should_reexecute(true); 1694 val = val->as_InlineType()->buffer(this, safe_for_replace); 1695 } 1696 1697 C2AccessValuePtr addr(adr, adr_type); 1698 C2AccessValue value(val, val_type); 1699 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr); 1700 if (access.is_raw()) { 1701 return _barrier_set->BarrierSetC2::store_at(access, value); 1702 } else { 1703 return _barrier_set->store_at(access, value); 1704 } 1705 } 1706 1707 Node* GraphKit::access_load_at(Node* obj, // containing obj 1708 Node* adr, // actual address to store val at 1709 const TypePtr* adr_type, 1710 const Type* val_type, 1711 BasicType bt, 1712 DecoratorSet decorators, 1713 Node* ctl) { 1714 if (stopped()) { 1715 return top(); // Dead path ? 1716 } 1717 1718 C2AccessValuePtr addr(adr, adr_type); 1719 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl); 1720 if (access.is_raw()) { 1721 return _barrier_set->BarrierSetC2::load_at(access, val_type); 1722 } else { 1723 return _barrier_set->load_at(access, val_type); 1724 } 1725 } 1726 1727 Node* GraphKit::access_load(Node* adr, // actual address to load val at 1728 const Type* val_type, 1729 BasicType bt, 1730 DecoratorSet decorators) { 1731 if (stopped()) { 1732 return top(); // Dead path ? 1733 } 1734 1735 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr()); 1736 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr); 1737 if (access.is_raw()) { 1738 return _barrier_set->BarrierSetC2::load_at(access, val_type); 1739 } else { 1740 return _barrier_set->load_at(access, val_type); 1741 } 1742 } 1743 1744 Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj, 1745 Node* adr, 1746 const TypePtr* adr_type, 1747 int alias_idx, 1748 Node* expected_val, 1749 Node* new_val, 1750 const Type* value_type, 1751 BasicType bt, 1752 DecoratorSet decorators) { 1753 C2AccessValuePtr addr(adr, adr_type); 1754 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, 1755 bt, obj, addr, alias_idx); 1756 if (access.is_raw()) { 1757 return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type); 1758 } else { 1759 return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type); 1760 } 1761 } 1762 1763 Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj, 1764 Node* adr, 1765 const TypePtr* adr_type, 1766 int alias_idx, 1767 Node* expected_val, 1768 Node* new_val, 1769 const Type* value_type, 1770 BasicType bt, 1771 DecoratorSet decorators) { 1772 C2AccessValuePtr addr(adr, adr_type); 1773 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, 1774 bt, obj, addr, alias_idx); 1775 if (access.is_raw()) { 1776 return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type); 1777 } else { 1778 return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type); 1779 } 1780 } 1781 1782 Node* GraphKit::access_atomic_xchg_at(Node* obj, 1783 Node* adr, 1784 const TypePtr* adr_type, 1785 int alias_idx, 1786 Node* new_val, 1787 const Type* value_type, 1788 BasicType bt, 1789 DecoratorSet decorators) { 1790 C2AccessValuePtr addr(adr, adr_type); 1791 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, 1792 bt, obj, addr, alias_idx); 1793 if (access.is_raw()) { 1794 return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type); 1795 } else { 1796 return _barrier_set->atomic_xchg_at(access, new_val, value_type); 1797 } 1798 } 1799 1800 Node* GraphKit::access_atomic_add_at(Node* obj, 1801 Node* adr, 1802 const TypePtr* adr_type, 1803 int alias_idx, 1804 Node* new_val, 1805 const Type* value_type, 1806 BasicType bt, 1807 DecoratorSet decorators) { 1808 C2AccessValuePtr addr(adr, adr_type); 1809 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx); 1810 if (access.is_raw()) { 1811 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type); 1812 } else { 1813 return _barrier_set->atomic_add_at(access, new_val, value_type); 1814 } 1815 } 1816 1817 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) { 1818 return _barrier_set->clone(this, src, dst, size, is_array); 1819 } 1820 1821 //-------------------------array_element_address------------------------- 1822 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, 1823 const TypeInt* sizetype, Node* ctrl) { 1824 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 1825 uint shift = arytype->is_flat() ? arytype->flat_log_elem_size() : exact_log2(type2aelembytes(elembt)); 1826 uint header = arrayOopDesc::base_offset_in_bytes(elembt); 1827 1828 // short-circuit a common case (saves lots of confusing waste motion) 1829 jint idx_con = find_int_con(idx, -1); 1830 if (idx_con >= 0) { 1831 intptr_t offset = header + ((intptr_t)idx_con << shift); 1832 return basic_plus_adr(ary, offset); 1833 } 1834 1835 // must be correct type for alignment purposes 1836 Node* base = basic_plus_adr(ary, header); 1837 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl); 1838 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) ); 1839 return basic_plus_adr(ary, base, scale); 1840 } 1841 1842 //-------------------------load_array_element------------------------- 1843 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) { 1844 const Type* elemtype = arytype->elem(); 1845 BasicType elembt = elemtype->array_element_basic_type(); 1846 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); 1847 if (elembt == T_NARROWOOP) { 1848 elembt = T_OBJECT; // To satisfy switch in LoadNode::make() 1849 } 1850 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt, 1851 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0)); 1852 return ld; 1853 } 1854 1855 //-------------------------set_arguments_for_java_call------------------------- 1856 // Arguments (pre-popped from the stack) are taken from the JVMS. 1857 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) { 1858 PreserveReexecuteState preexecs(this); 1859 if (EnableValhalla) { 1860 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization. 1861 // At this point, the call hasn't been executed yet, so we will only ever execute the call once. 1862 jvms()->set_should_reexecute(true); 1863 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc()); 1864 inc_sp(arg_size); 1865 } 1866 // Add the call arguments 1867 const TypeTuple* domain = call->tf()->domain_sig(); 1868 uint nargs = domain->cnt(); 1869 int arg_num = 0; 1870 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) { 1871 Node* arg = argument(i-TypeFunc::Parms); 1872 const Type* t = domain->field_at(i); 1873 // TODO 8284443 A static call to a mismatched method should still be scalarized 1874 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) { 1875 // We don't pass inline type arguments by reference but instead pass each field of the inline type 1876 if (!arg->is_InlineType()) { 1877 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type"); 1878 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free()); 1879 } 1880 InlineTypeNode* vt = arg->as_InlineType(); 1881 vt->pass_fields(this, call, idx, true, !t->maybe_null()); 1882 // If an inline type argument is passed as fields, attach the Method* to the call site 1883 // to be able to access the extended signature later via attached_method_before_pc(). 1884 // For example, see CompiledMethod::preserve_callee_argument_oops(). 1885 call->set_override_symbolic_info(true); 1886 // Register an evol dependency on the callee method to make sure that this method is deoptimized and 1887 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched. 1888 C->dependencies()->assert_evol_method(call->method()); 1889 arg_num++; 1890 continue; 1891 } else if (arg->is_InlineType()) { 1892 // Pass inline type argument via oop to callee 1893 arg = arg->as_InlineType()->buffer(this); 1894 } 1895 if (t != Type::HALF) { 1896 arg_num++; 1897 } 1898 call->init_req(idx++, arg); 1899 } 1900 } 1901 1902 //---------------------------set_edges_for_java_call--------------------------- 1903 // Connect a newly created call into the current JVMS. 1904 // A return value node (if any) is returned from set_edges_for_java_call. 1905 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) { 1906 1907 // Add the predefined inputs: 1908 call->init_req( TypeFunc::Control, control() ); 1909 call->init_req( TypeFunc::I_O , i_o() ); 1910 call->init_req( TypeFunc::Memory , reset_memory() ); 1911 call->init_req( TypeFunc::FramePtr, frameptr() ); 1912 call->init_req( TypeFunc::ReturnAdr, top() ); 1913 1914 add_safepoint_edges(call, must_throw); 1915 1916 Node* xcall = _gvn.transform(call); 1917 1918 if (xcall == top()) { 1919 set_control(top()); 1920 return; 1921 } 1922 assert(xcall == call, "call identity is stable"); 1923 1924 // Re-use the current map to produce the result. 1925 1926 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control))); 1927 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj))); 1928 set_all_memory_call(xcall, separate_io_proj); 1929 1930 //return xcall; // no need, caller already has it 1931 } 1932 1933 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) { 1934 if (stopped()) return top(); // maybe the call folded up? 1935 1936 // Note: Since any out-of-line call can produce an exception, 1937 // we always insert an I_O projection from the call into the result. 1938 1939 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize); 1940 1941 if (separate_io_proj) { 1942 // The caller requested separate projections be used by the fall 1943 // through and exceptional paths, so replace the projections for 1944 // the fall through path. 1945 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); 1946 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); 1947 } 1948 1949 // Capture the return value, if any. 1950 Node* ret; 1951 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) { 1952 ret = top(); 1953 } else if (call->tf()->returns_inline_type_as_fields()) { 1954 // Return of multiple values (inline type fields): we create a 1955 // InlineType node, each field is a projection from the call. 1956 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass(); 1957 uint base_input = TypeFunc::Parms; 1958 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false); 1959 } else { 1960 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); 1961 ciType* t = call->method()->return_type(); 1962 if (t->is_klass()) { 1963 const Type* type = TypeOopPtr::make_from_klass(t->as_klass()); 1964 if (type->is_inlinetypeptr()) { 1965 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free()); 1966 } 1967 } 1968 } 1969 1970 // We just called the constructor on a value type receiver. Reload it from the buffer 1971 if (call->method()->is_object_constructor() && call->method()->holder()->is_inlinetype()) { 1972 InlineTypeNode* receiver = call->in(TypeFunc::Parms)->as_InlineType(); 1973 assert(receiver->is_larval(), "must be larval"); 1974 assert(receiver->is_allocated(&gvn()), "larval must be buffered"); 1975 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, receiver->get_oop(), receiver->bottom_type()->inline_klass(), true); 1976 assert(!reloaded->is_larval(), "should not be larval anymore"); 1977 replace_in_map(receiver, reloaded); 1978 } 1979 1980 return ret; 1981 } 1982 1983 //--------------------set_predefined_input_for_runtime_call-------------------- 1984 // Reading and setting the memory state is way conservative here. 1985 // The real problem is that I am not doing real Type analysis on memory, 1986 // so I cannot distinguish card mark stores from other stores. Across a GC 1987 // point the Store Barrier and the card mark memory has to agree. I cannot 1988 // have a card mark store and its barrier split across the GC point from 1989 // either above or below. Here I get that to happen by reading ALL of memory. 1990 // A better answer would be to separate out card marks from other memory. 1991 // For now, return the input memory state, so that it can be reused 1992 // after the call, if this call has restricted memory effects. 1993 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) { 1994 // Set fixed predefined input arguments 1995 Node* memory = reset_memory(); 1996 Node* m = narrow_mem == nullptr ? memory : narrow_mem; 1997 call->init_req( TypeFunc::Control, control() ); 1998 call->init_req( TypeFunc::I_O, top() ); // does no i/o 1999 call->init_req( TypeFunc::Memory, m ); // may gc ptrs 2000 call->init_req( TypeFunc::FramePtr, frameptr() ); 2001 call->init_req( TypeFunc::ReturnAdr, top() ); 2002 return memory; 2003 } 2004 2005 //-------------------set_predefined_output_for_runtime_call-------------------- 2006 // Set control and memory (not i_o) from the call. 2007 // If keep_mem is not null, use it for the output state, 2008 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM. 2009 // If hook_mem is null, this call produces no memory effects at all. 2010 // If hook_mem is a Java-visible memory slice (such as arraycopy operands), 2011 // then only that memory slice is taken from the call. 2012 // In the last case, we must put an appropriate memory barrier before 2013 // the call, so as to create the correct anti-dependencies on loads 2014 // preceding the call. 2015 void GraphKit::set_predefined_output_for_runtime_call(Node* call, 2016 Node* keep_mem, 2017 const TypePtr* hook_mem) { 2018 // no i/o 2019 set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) )); 2020 if (keep_mem) { 2021 // First clone the existing memory state 2022 set_all_memory(keep_mem); 2023 if (hook_mem != nullptr) { 2024 // Make memory for the call 2025 Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) ); 2026 // Set the RawPtr memory state only. This covers all the heap top/GC stuff 2027 // We also use hook_mem to extract specific effects from arraycopy stubs. 2028 set_memory(mem, hook_mem); 2029 } 2030 // ...else the call has NO memory effects. 2031 2032 // Make sure the call advertises its memory effects precisely. 2033 // This lets us build accurate anti-dependences in gcm.cpp. 2034 assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem), 2035 "call node must be constructed correctly"); 2036 } else { 2037 assert(hook_mem == nullptr, ""); 2038 // This is not a "slow path" call; all memory comes from the call. 2039 set_all_memory_call(call); 2040 } 2041 } 2042 2043 // Keep track of MergeMems feeding into other MergeMems 2044 static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) { 2045 if (!mem->is_MergeMem()) { 2046 return; 2047 } 2048 for (SimpleDUIterator i(mem); i.has_next(); i.next()) { 2049 Node* use = i.get(); 2050 if (use->is_MergeMem()) { 2051 wl.push(use); 2052 } 2053 } 2054 } 2055 2056 // Replace the call with the current state of the kit. 2057 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) { 2058 JVMState* ejvms = nullptr; 2059 if (has_exceptions()) { 2060 ejvms = transfer_exceptions_into_jvms(); 2061 } 2062 2063 ReplacedNodes replaced_nodes = map()->replaced_nodes(); 2064 ReplacedNodes replaced_nodes_exception; 2065 Node* ex_ctl = top(); 2066 2067 SafePointNode* final_state = stop(); 2068 2069 // Find all the needed outputs of this call 2070 CallProjections* callprojs = call->extract_projections(true); 2071 2072 Unique_Node_List wl; 2073 Node* init_mem = call->in(TypeFunc::Memory); 2074 Node* final_mem = final_state->in(TypeFunc::Memory); 2075 Node* final_ctl = final_state->in(TypeFunc::Control); 2076 Node* final_io = final_state->in(TypeFunc::I_O); 2077 2078 // Replace all the old call edges with the edges from the inlining result 2079 if (callprojs->fallthrough_catchproj != nullptr) { 2080 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl); 2081 } 2082 if (callprojs->fallthrough_memproj != nullptr) { 2083 if (final_mem->is_MergeMem()) { 2084 // Parser's exits MergeMem was not transformed but may be optimized 2085 final_mem = _gvn.transform(final_mem); 2086 } 2087 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem); 2088 add_mergemem_users_to_worklist(wl, final_mem); 2089 } 2090 if (callprojs->fallthrough_ioproj != nullptr) { 2091 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io); 2092 } 2093 2094 // Replace the result with the new result if it exists and is used 2095 if (callprojs->resproj[0] != nullptr && result != nullptr) { 2096 // If the inlined code is dead, the result projections for an inline type returned as 2097 // fields have not been replaced. They will go away once the call is replaced by TOP below. 2098 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()), 2099 "unexpected number of results"); 2100 C->gvn_replace_by(callprojs->resproj[0], result); 2101 } 2102 2103 if (ejvms == nullptr) { 2104 // No exception edges to simply kill off those paths 2105 if (callprojs->catchall_catchproj != nullptr) { 2106 C->gvn_replace_by(callprojs->catchall_catchproj, C->top()); 2107 } 2108 if (callprojs->catchall_memproj != nullptr) { 2109 C->gvn_replace_by(callprojs->catchall_memproj, C->top()); 2110 } 2111 if (callprojs->catchall_ioproj != nullptr) { 2112 C->gvn_replace_by(callprojs->catchall_ioproj, C->top()); 2113 } 2114 // Replace the old exception object with top 2115 if (callprojs->exobj != nullptr) { 2116 C->gvn_replace_by(callprojs->exobj, C->top()); 2117 } 2118 } else { 2119 GraphKit ekit(ejvms); 2120 2121 // Load my combined exception state into the kit, with all phis transformed: 2122 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); 2123 replaced_nodes_exception = ex_map->replaced_nodes(); 2124 2125 Node* ex_oop = ekit.use_exception_state(ex_map); 2126 2127 if (callprojs->catchall_catchproj != nullptr) { 2128 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control()); 2129 ex_ctl = ekit.control(); 2130 } 2131 if (callprojs->catchall_memproj != nullptr) { 2132 Node* ex_mem = ekit.reset_memory(); 2133 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem); 2134 add_mergemem_users_to_worklist(wl, ex_mem); 2135 } 2136 if (callprojs->catchall_ioproj != nullptr) { 2137 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o()); 2138 } 2139 2140 // Replace the old exception object with the newly created one 2141 if (callprojs->exobj != nullptr) { 2142 C->gvn_replace_by(callprojs->exobj, ex_oop); 2143 } 2144 } 2145 2146 // Disconnect the call from the graph 2147 call->disconnect_inputs(C); 2148 C->gvn_replace_by(call, C->top()); 2149 2150 // Clean up any MergeMems that feed other MergeMems since the 2151 // optimizer doesn't like that. 2152 while (wl.size() > 0) { 2153 _gvn.transform(wl.pop()); 2154 } 2155 2156 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) { 2157 replaced_nodes.apply(C, final_ctl); 2158 } 2159 if (!ex_ctl->is_top() && do_replaced_nodes) { 2160 replaced_nodes_exception.apply(C, ex_ctl); 2161 } 2162 } 2163 2164 2165 //------------------------------increment_counter------------------------------ 2166 // for statistics: increment a VM counter by 1 2167 2168 void GraphKit::increment_counter(address counter_addr) { 2169 Node* adr1 = makecon(TypeRawPtr::make(counter_addr)); 2170 increment_counter(adr1); 2171 } 2172 2173 void GraphKit::increment_counter(Node* counter_addr) { 2174 int adr_type = Compile::AliasIdxRaw; 2175 Node* ctrl = control(); 2176 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered); 2177 Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1))); 2178 store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered); 2179 } 2180 2181 2182 //------------------------------uncommon_trap---------------------------------- 2183 // Bail out to the interpreter in mid-method. Implemented by calling the 2184 // uncommon_trap blob. This helper function inserts a runtime call with the 2185 // right debug info. 2186 Node* GraphKit::uncommon_trap(int trap_request, 2187 ciKlass* klass, const char* comment, 2188 bool must_throw, 2189 bool keep_exact_action) { 2190 if (failing()) stop(); 2191 if (stopped()) return nullptr; // trap reachable? 2192 2193 // Note: If ProfileTraps is true, and if a deopt. actually 2194 // occurs here, the runtime will make sure an MDO exists. There is 2195 // no need to call method()->ensure_method_data() at this point. 2196 2197 // Set the stack pointer to the right value for reexecution: 2198 set_sp(reexecute_sp()); 2199 2200 #ifdef ASSERT 2201 if (!must_throw) { 2202 // Make sure the stack has at least enough depth to execute 2203 // the current bytecode. 2204 int inputs, ignored_depth; 2205 if (compute_stack_effects(inputs, ignored_depth)) { 2206 assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d", 2207 Bytecodes::name(java_bc()), sp(), inputs); 2208 } 2209 } 2210 #endif 2211 2212 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 2213 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); 2214 2215 switch (action) { 2216 case Deoptimization::Action_maybe_recompile: 2217 case Deoptimization::Action_reinterpret: 2218 // Temporary fix for 6529811 to allow virtual calls to be sure they 2219 // get the chance to go from mono->bi->mega 2220 if (!keep_exact_action && 2221 Deoptimization::trap_request_index(trap_request) < 0 && 2222 too_many_recompiles(reason)) { 2223 // This BCI is causing too many recompilations. 2224 if (C->log() != nullptr) { 2225 C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'", 2226 Deoptimization::trap_reason_name(reason), 2227 Deoptimization::trap_action_name(action)); 2228 } 2229 action = Deoptimization::Action_none; 2230 trap_request = Deoptimization::make_trap_request(reason, action); 2231 } else { 2232 C->set_trap_can_recompile(true); 2233 } 2234 break; 2235 case Deoptimization::Action_make_not_entrant: 2236 C->set_trap_can_recompile(true); 2237 break; 2238 case Deoptimization::Action_none: 2239 case Deoptimization::Action_make_not_compilable: 2240 break; 2241 default: 2242 #ifdef ASSERT 2243 fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action)); 2244 #endif 2245 break; 2246 } 2247 2248 if (TraceOptoParse) { 2249 char buf[100]; 2250 tty->print_cr("Uncommon trap %s at bci:%d", 2251 Deoptimization::format_trap_request(buf, sizeof(buf), 2252 trap_request), bci()); 2253 } 2254 2255 CompileLog* log = C->log(); 2256 if (log != nullptr) { 2257 int kid = (klass == nullptr)? -1: log->identify(klass); 2258 log->begin_elem("uncommon_trap bci='%d'", bci()); 2259 char buf[100]; 2260 log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf), 2261 trap_request)); 2262 if (kid >= 0) log->print(" klass='%d'", kid); 2263 if (comment != nullptr) log->print(" comment='%s'", comment); 2264 log->end_elem(); 2265 } 2266 2267 // Make sure any guarding test views this path as very unlikely 2268 Node *i0 = control()->in(0); 2269 if (i0 != nullptr && i0->is_If()) { // Found a guarding if test? 2270 IfNode *iff = i0->as_If(); 2271 float f = iff->_prob; // Get prob 2272 if (control()->Opcode() == Op_IfTrue) { 2273 if (f > PROB_UNLIKELY_MAG(4)) 2274 iff->_prob = PROB_MIN; 2275 } else { 2276 if (f < PROB_LIKELY_MAG(4)) 2277 iff->_prob = PROB_MAX; 2278 } 2279 } 2280 2281 // Clear out dead values from the debug info. 2282 kill_dead_locals(); 2283 2284 // Now insert the uncommon trap subroutine call 2285 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); 2286 const TypePtr* no_memory_effects = nullptr; 2287 // Pass the index of the class to be loaded 2288 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | 2289 (must_throw ? RC_MUST_THROW : 0), 2290 OptoRuntime::uncommon_trap_Type(), 2291 call_addr, "uncommon_trap", no_memory_effects, 2292 intcon(trap_request)); 2293 assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request, 2294 "must extract request correctly from the graph"); 2295 assert(trap_request != 0, "zero value reserved by uncommon_trap_request"); 2296 2297 call->set_req(TypeFunc::ReturnAdr, returnadr()); 2298 // The debug info is the only real input to this call. 2299 2300 // Halt-and-catch fire here. The above call should never return! 2301 HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen" 2302 PRODUCT_ONLY(COMMA /*reachable*/false)); 2303 _gvn.set_type_bottom(halt); 2304 root()->add_req(halt); 2305 2306 stop_and_kill_map(); 2307 return call; 2308 } 2309 2310 2311 //--------------------------just_allocated_object------------------------------ 2312 // Report the object that was just allocated. 2313 // It must be the case that there are no intervening safepoints. 2314 // We use this to determine if an object is so "fresh" that 2315 // it does not require card marks. 2316 Node* GraphKit::just_allocated_object(Node* current_control) { 2317 Node* ctrl = current_control; 2318 // Object::<init> is invoked after allocation, most of invoke nodes 2319 // will be reduced, but a region node is kept in parse time, we check 2320 // the pattern and skip the region node if it degraded to a copy. 2321 if (ctrl != nullptr && ctrl->is_Region() && ctrl->req() == 2 && 2322 ctrl->as_Region()->is_copy()) { 2323 ctrl = ctrl->as_Region()->is_copy(); 2324 } 2325 if (C->recent_alloc_ctl() == ctrl) { 2326 return C->recent_alloc_obj(); 2327 } 2328 return nullptr; 2329 } 2330 2331 2332 /** 2333 * Record profiling data exact_kls for Node n with the type system so 2334 * that it can propagate it (speculation) 2335 * 2336 * @param n node that the type applies to 2337 * @param exact_kls type from profiling 2338 * @param maybe_null did profiling see null? 2339 * 2340 * @return node with improved type 2341 */ 2342 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) { 2343 const Type* current_type = _gvn.type(n); 2344 assert(UseTypeSpeculation, "type speculation must be on"); 2345 2346 const TypePtr* speculative = current_type->speculative(); 2347 2348 // Should the klass from the profile be recorded in the speculative type? 2349 if (current_type->would_improve_type(exact_kls, jvms()->depth())) { 2350 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces); 2351 const TypeOopPtr* xtype = tklass->as_instance_type(); 2352 assert(xtype->klass_is_exact(), "Should be exact"); 2353 // Any reason to believe n is not null (from this profiling or a previous one)? 2354 assert(ptr_kind != ProfileAlwaysNull, "impossible here"); 2355 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL; 2356 // record the new speculative type's depth 2357 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr(); 2358 speculative = speculative->with_inline_depth(jvms()->depth()); 2359 } else if (current_type->would_improve_ptr(ptr_kind)) { 2360 // Profiling report that null was never seen so we can change the 2361 // speculative type to non null ptr. 2362 if (ptr_kind == ProfileAlwaysNull) { 2363 speculative = TypePtr::NULL_PTR; 2364 } else { 2365 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement"); 2366 const TypePtr* ptr = TypePtr::NOTNULL; 2367 if (speculative != nullptr) { 2368 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr(); 2369 } else { 2370 speculative = ptr; 2371 } 2372 } 2373 } 2374 2375 if (speculative != current_type->speculative()) { 2376 // Build a type with a speculative type (what we think we know 2377 // about the type but will need a guard when we use it) 2378 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative); 2379 // We're changing the type, we need a new CheckCast node to carry 2380 // the new type. The new type depends on the control: what 2381 // profiling tells us is only valid from here as far as we can 2382 // tell. 2383 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); 2384 cast = _gvn.transform(cast); 2385 replace_in_map(n, cast); 2386 n = cast; 2387 } 2388 2389 return n; 2390 } 2391 2392 /** 2393 * Record profiling data from receiver profiling at an invoke with the 2394 * type system so that it can propagate it (speculation) 2395 * 2396 * @param n receiver node 2397 * 2398 * @return node with improved type 2399 */ 2400 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { 2401 if (!UseTypeSpeculation) { 2402 return n; 2403 } 2404 ciKlass* exact_kls = profile_has_unique_klass(); 2405 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2406 if ((java_bc() == Bytecodes::_checkcast || 2407 java_bc() == Bytecodes::_instanceof || 2408 java_bc() == Bytecodes::_aastore) && 2409 method()->method_data()->is_mature()) { 2410 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 2411 if (data != nullptr) { 2412 if (java_bc() == Bytecodes::_aastore) { 2413 ciKlass* array_type = nullptr; 2414 ciKlass* element_type = nullptr; 2415 ProfilePtrKind element_ptr = ProfileMaybeNull; 2416 bool flat_array = true; 2417 bool null_free_array = true; 2418 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 2419 exact_kls = element_type; 2420 ptr_kind = element_ptr; 2421 } else { 2422 if (!data->as_BitData()->null_seen()) { 2423 ptr_kind = ProfileNeverNull; 2424 } else { 2425 assert(data->is_ReceiverTypeData(), "bad profile data type"); 2426 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 2427 uint i = 0; 2428 for (; i < call->row_limit(); i++) { 2429 ciKlass* receiver = call->receiver(i); 2430 if (receiver != nullptr) { 2431 break; 2432 } 2433 } 2434 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull; 2435 } 2436 } 2437 } 2438 } 2439 return record_profile_for_speculation(n, exact_kls, ptr_kind); 2440 } 2441 2442 /** 2443 * Record profiling data from argument profiling at an invoke with the 2444 * type system so that it can propagate it (speculation) 2445 * 2446 * @param dest_method target method for the call 2447 * @param bc what invoke bytecode is this? 2448 */ 2449 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { 2450 if (!UseTypeSpeculation) { 2451 return; 2452 } 2453 const TypeFunc* tf = TypeFunc::make(dest_method); 2454 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; 2455 int skip = Bytecodes::has_receiver(bc) ? 1 : 0; 2456 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { 2457 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); 2458 if (is_reference_type(targ->basic_type())) { 2459 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2460 ciKlass* better_type = nullptr; 2461 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { 2462 record_profile_for_speculation(argument(j), better_type, ptr_kind); 2463 } 2464 i++; 2465 } 2466 } 2467 } 2468 2469 /** 2470 * Record profiling data from parameter profiling at an invoke with 2471 * the type system so that it can propagate it (speculation) 2472 */ 2473 void GraphKit::record_profiled_parameters_for_speculation() { 2474 if (!UseTypeSpeculation) { 2475 return; 2476 } 2477 for (int i = 0, j = 0; i < method()->arg_size() ; i++) { 2478 if (_gvn.type(local(i))->isa_oopptr()) { 2479 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2480 ciKlass* better_type = nullptr; 2481 if (method()->parameter_profiled_type(j, better_type, ptr_kind)) { 2482 record_profile_for_speculation(local(i), better_type, ptr_kind); 2483 } 2484 j++; 2485 } 2486 } 2487 } 2488 2489 /** 2490 * Record profiling data from return value profiling at an invoke with 2491 * the type system so that it can propagate it (speculation) 2492 */ 2493 void GraphKit::record_profiled_return_for_speculation() { 2494 if (!UseTypeSpeculation) { 2495 return; 2496 } 2497 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2498 ciKlass* better_type = nullptr; 2499 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) { 2500 // If profiling reports a single type for the return value, 2501 // feed it to the type system so it can propagate it as a 2502 // speculative type 2503 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind); 2504 } 2505 } 2506 2507 void GraphKit::round_double_arguments(ciMethod* dest_method) { 2508 if (Matcher::strict_fp_requires_explicit_rounding) { 2509 // (Note: TypeFunc::make has a cache that makes this fast.) 2510 const TypeFunc* tf = TypeFunc::make(dest_method); 2511 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; 2512 for (int j = 0; j < nargs; j++) { 2513 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); 2514 if (targ->basic_type() == T_DOUBLE) { 2515 // If any parameters are doubles, they must be rounded before 2516 // the call, dprecision_rounding does gvn.transform 2517 Node *arg = argument(j); 2518 arg = dprecision_rounding(arg); 2519 set_argument(j, arg); 2520 } 2521 } 2522 } 2523 } 2524 2525 // rounding for strict float precision conformance 2526 Node* GraphKit::precision_rounding(Node* n) { 2527 if (Matcher::strict_fp_requires_explicit_rounding) { 2528 #ifdef IA32 2529 if (UseSSE == 0) { 2530 return _gvn.transform(new RoundFloatNode(0, n)); 2531 } 2532 #else 2533 Unimplemented(); 2534 #endif // IA32 2535 } 2536 return n; 2537 } 2538 2539 // rounding for strict double precision conformance 2540 Node* GraphKit::dprecision_rounding(Node *n) { 2541 if (Matcher::strict_fp_requires_explicit_rounding) { 2542 #ifdef IA32 2543 if (UseSSE < 2) { 2544 return _gvn.transform(new RoundDoubleNode(0, n)); 2545 } 2546 #else 2547 Unimplemented(); 2548 #endif // IA32 2549 } 2550 return n; 2551 } 2552 2553 //============================================================================= 2554 // Generate a fast path/slow path idiom. Graph looks like: 2555 // [foo] indicates that 'foo' is a parameter 2556 // 2557 // [in] null 2558 // \ / 2559 // CmpP 2560 // Bool ne 2561 // If 2562 // / \ 2563 // True False-<2> 2564 // / | 2565 // / cast_not_null 2566 // Load | | ^ 2567 // [fast_test] | | 2568 // gvn to opt_test | | 2569 // / \ | <1> 2570 // True False | 2571 // | \\ | 2572 // [slow_call] \[fast_result] 2573 // Ctl Val \ \ 2574 // | \ \ 2575 // Catch <1> \ \ 2576 // / \ ^ \ \ 2577 // Ex No_Ex | \ \ 2578 // | \ \ | \ <2> \ 2579 // ... \ [slow_res] | | \ [null_result] 2580 // \ \--+--+--- | | 2581 // \ | / \ | / 2582 // --------Region Phi 2583 // 2584 //============================================================================= 2585 // Code is structured as a series of driver functions all called 'do_XXX' that 2586 // call a set of helper functions. Helper functions first, then drivers. 2587 2588 //------------------------------null_check_oop--------------------------------- 2589 // Null check oop. Set null-path control into Region in slot 3. 2590 // Make a cast-not-nullness use the other not-null control. Return cast. 2591 Node* GraphKit::null_check_oop(Node* value, Node* *null_control, 2592 bool never_see_null, 2593 bool safe_for_replace, 2594 bool speculative) { 2595 // Initial null check taken path 2596 (*null_control) = top(); 2597 Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative); 2598 2599 // Generate uncommon_trap: 2600 if (never_see_null && (*null_control) != top()) { 2601 // If we see an unexpected null at a check-cast we record it and force a 2602 // recompile; the offending check-cast will be compiled to handle nulls. 2603 // If we see more than one offending BCI, then all checkcasts in the 2604 // method will be compiled to handle nulls. 2605 PreserveJVMState pjvms(this); 2606 set_control(*null_control); 2607 replace_in_map(value, null()); 2608 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative); 2609 uncommon_trap(reason, 2610 Deoptimization::Action_make_not_entrant); 2611 (*null_control) = top(); // null path is dead 2612 } 2613 if ((*null_control) == top() && safe_for_replace) { 2614 replace_in_map(value, cast); 2615 } 2616 2617 // Cast away null-ness on the result 2618 return cast; 2619 } 2620 2621 //------------------------------opt_iff---------------------------------------- 2622 // Optimize the fast-check IfNode. Set the fast-path region slot 2. 2623 // Return slow-path control. 2624 Node* GraphKit::opt_iff(Node* region, Node* iff) { 2625 IfNode *opt_iff = _gvn.transform(iff)->as_If(); 2626 2627 // Fast path taken; set region slot 2 2628 Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) ); 2629 region->init_req(2,fast_taken); // Capture fast-control 2630 2631 // Fast path not-taken, i.e. slow path 2632 Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) ); 2633 return slow_taken; 2634 } 2635 2636 //-----------------------------make_runtime_call------------------------------- 2637 Node* GraphKit::make_runtime_call(int flags, 2638 const TypeFunc* call_type, address call_addr, 2639 const char* call_name, 2640 const TypePtr* adr_type, 2641 // The following parms are all optional. 2642 // The first null ends the list. 2643 Node* parm0, Node* parm1, 2644 Node* parm2, Node* parm3, 2645 Node* parm4, Node* parm5, 2646 Node* parm6, Node* parm7) { 2647 assert(call_addr != nullptr, "must not call null targets"); 2648 2649 // Slow-path call 2650 bool is_leaf = !(flags & RC_NO_LEAF); 2651 bool has_io = (!is_leaf && !(flags & RC_NO_IO)); 2652 if (call_name == nullptr) { 2653 assert(!is_leaf, "must supply name for leaf"); 2654 call_name = OptoRuntime::stub_name(call_addr); 2655 } 2656 CallNode* call; 2657 if (!is_leaf) { 2658 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type); 2659 } else if (flags & RC_NO_FP) { 2660 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); 2661 } else if (flags & RC_VECTOR){ 2662 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte; 2663 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits); 2664 } else { 2665 call = new CallLeafNode(call_type, call_addr, call_name, adr_type); 2666 } 2667 2668 // The following is similar to set_edges_for_java_call, 2669 // except that the memory effects of the call are restricted to AliasIdxRaw. 2670 2671 // Slow path call has no side-effects, uses few values 2672 bool wide_in = !(flags & RC_NARROW_MEM); 2673 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot); 2674 2675 Node* prev_mem = nullptr; 2676 if (wide_in) { 2677 prev_mem = set_predefined_input_for_runtime_call(call); 2678 } else { 2679 assert(!wide_out, "narrow in => narrow out"); 2680 Node* narrow_mem = memory(adr_type); 2681 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem); 2682 } 2683 2684 // Hook each parm in order. Stop looking at the first null. 2685 if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0); 2686 if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1); 2687 if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2); 2688 if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3); 2689 if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4); 2690 if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5); 2691 if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6); 2692 if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7); 2693 /* close each nested if ===> */ } } } } } } } } 2694 assert(call->in(call->req()-1) != nullptr, "must initialize all parms"); 2695 2696 if (!is_leaf) { 2697 // Non-leaves can block and take safepoints: 2698 add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0)); 2699 } 2700 // Non-leaves can throw exceptions: 2701 if (has_io) { 2702 call->set_req(TypeFunc::I_O, i_o()); 2703 } 2704 2705 if (flags & RC_UNCOMMON) { 2706 // Set the count to a tiny probability. Cf. Estimate_Block_Frequency. 2707 // (An "if" probability corresponds roughly to an unconditional count. 2708 // Sort of.) 2709 call->set_cnt(PROB_UNLIKELY_MAG(4)); 2710 } 2711 2712 Node* c = _gvn.transform(call); 2713 assert(c == call, "cannot disappear"); 2714 2715 if (wide_out) { 2716 // Slow path call has full side-effects. 2717 set_predefined_output_for_runtime_call(call); 2718 } else { 2719 // Slow path call has few side-effects, and/or sets few values. 2720 set_predefined_output_for_runtime_call(call, prev_mem, adr_type); 2721 } 2722 2723 if (has_io) { 2724 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O))); 2725 } 2726 return call; 2727 2728 } 2729 2730 // i2b 2731 Node* GraphKit::sign_extend_byte(Node* in) { 2732 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24))); 2733 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24))); 2734 } 2735 2736 // i2s 2737 Node* GraphKit::sign_extend_short(Node* in) { 2738 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16))); 2739 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16))); 2740 } 2741 2742 2743 //------------------------------merge_memory----------------------------------- 2744 // Merge memory from one path into the current memory state. 2745 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) { 2746 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) { 2747 Node* old_slice = mms.force_memory(); 2748 Node* new_slice = mms.memory2(); 2749 if (old_slice != new_slice) { 2750 PhiNode* phi; 2751 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) { 2752 if (mms.is_empty()) { 2753 // clone base memory Phi's inputs for this memory slice 2754 assert(old_slice == mms.base_memory(), "sanity"); 2755 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C)); 2756 _gvn.set_type(phi, Type::MEMORY); 2757 for (uint i = 1; i < phi->req(); i++) { 2758 phi->init_req(i, old_slice->in(i)); 2759 } 2760 } else { 2761 phi = old_slice->as_Phi(); // Phi was generated already 2762 } 2763 } else { 2764 phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C)); 2765 _gvn.set_type(phi, Type::MEMORY); 2766 } 2767 phi->set_req(new_path, new_slice); 2768 mms.set_memory(phi); 2769 } 2770 } 2771 } 2772 2773 //------------------------------make_slow_call_ex------------------------------ 2774 // Make the exception handler hookups for the slow call 2775 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) { 2776 if (stopped()) return; 2777 2778 // Make a catch node with just two handlers: fall-through and catch-all 2779 Node* i_o = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) ); 2780 Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) ); 2781 Node* norm = new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci); 2782 _gvn.set_type_bottom(norm); 2783 C->record_for_igvn(norm); 2784 Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); 2785 2786 { PreserveJVMState pjvms(this); 2787 set_control(excp); 2788 set_i_o(i_o); 2789 2790 if (excp != top()) { 2791 if (deoptimize) { 2792 // Deoptimize if an exception is caught. Don't construct exception state in this case. 2793 uncommon_trap(Deoptimization::Reason_unhandled, 2794 Deoptimization::Action_none); 2795 } else { 2796 // Create an exception state also. 2797 // Use an exact type if the caller has a specific exception. 2798 const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); 2799 Node* ex_oop = new CreateExNode(ex_type, control(), i_o); 2800 add_exception_state(make_exception_state(_gvn.transform(ex_oop))); 2801 } 2802 } 2803 } 2804 2805 // Get the no-exception control from the CatchNode. 2806 set_control(norm); 2807 } 2808 2809 static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) { 2810 Node* cmp = nullptr; 2811 switch(bt) { 2812 case T_INT: cmp = new CmpINode(in1, in2); break; 2813 case T_ADDRESS: cmp = new CmpPNode(in1, in2); break; 2814 default: fatal("unexpected comparison type %s", type2name(bt)); 2815 } 2816 cmp = gvn.transform(cmp); 2817 Node* bol = gvn.transform(new BoolNode(cmp, test)); 2818 IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN); 2819 gvn.transform(iff); 2820 if (!bol->is_Con()) gvn.record_for_igvn(iff); 2821 return iff; 2822 } 2823 2824 //-------------------------------gen_subtype_check----------------------------- 2825 // Generate a subtyping check. Takes as input the subtype and supertype. 2826 // Returns 2 values: sets the default control() to the true path and returns 2827 // the false path. Only reads invariant memory; sets no (visible) memory. 2828 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding 2829 // but that's not exposed to the optimizer. This call also doesn't take in an 2830 // Object; if you wish to check an Object you need to load the Object's class 2831 // prior to coming here. 2832 Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Node* mem, PhaseGVN& gvn, 2833 ciMethod* method, int bci) { 2834 Compile* C = gvn.C; 2835 if ((*ctrl)->is_top()) { 2836 return C->top(); 2837 } 2838 2839 // Fast check for identical types, perhaps identical constants. 2840 // The types can even be identical non-constants, in cases 2841 // involving Array.newInstance, Object.clone, etc. 2842 if (subklass == superklass) 2843 return C->top(); // false path is dead; no test needed. 2844 2845 if (gvn.type(superklass)->singleton()) { 2846 const TypeKlassPtr* superk = gvn.type(superklass)->is_klassptr(); 2847 const TypeKlassPtr* subk = gvn.type(subklass)->is_klassptr(); 2848 2849 // In the common case of an exact superklass, try to fold up the 2850 // test before generating code. You may ask, why not just generate 2851 // the code and then let it fold up? The answer is that the generated 2852 // code will necessarily include null checks, which do not always 2853 // completely fold away. If they are also needless, then they turn 2854 // into a performance loss. Example: 2855 // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x; 2856 // Here, the type of 'fa' is often exact, so the store check 2857 // of fa[1]=x will fold up, without testing the nullness of x. 2858 switch (C->static_subtype_check(superk, subk)) { 2859 case Compile::SSC_always_false: 2860 { 2861 Node* always_fail = *ctrl; 2862 *ctrl = gvn.C->top(); 2863 return always_fail; 2864 } 2865 case Compile::SSC_always_true: 2866 return C->top(); 2867 case Compile::SSC_easy_test: 2868 { 2869 // Just do a direct pointer compare and be done. 2870 IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS); 2871 *ctrl = gvn.transform(new IfTrueNode(iff)); 2872 return gvn.transform(new IfFalseNode(iff)); 2873 } 2874 case Compile::SSC_full_test: 2875 break; 2876 default: 2877 ShouldNotReachHere(); 2878 } 2879 } 2880 2881 // %%% Possible further optimization: Even if the superklass is not exact, 2882 // if the subklass is the unique subtype of the superklass, the check 2883 // will always succeed. We could leave a dependency behind to ensure this. 2884 2885 // First load the super-klass's check-offset 2886 Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset())))); 2887 Node* m = C->immutable_memory(); 2888 Node *chk_off = gvn.transform(new LoadINode(nullptr, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); 2889 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); 2890 const TypeInt* chk_off_t = chk_off->Value(&gvn)->isa_int(); 2891 int chk_off_con = (chk_off_t != nullptr && chk_off_t->is_con()) ? chk_off_t->get_con() : cacheoff_con; 2892 bool might_be_cache = (chk_off_con == cacheoff_con); 2893 2894 // Load from the sub-klass's super-class display list, or a 1-word cache of 2895 // the secondary superclass list, or a failing value with a sentinel offset 2896 // if the super-klass is an interface or exceptionally deep in the Java 2897 // hierarchy and we have to scan the secondary superclass list the hard way. 2898 // Worst-case type is a little odd: null is allowed as a result (usually 2899 // klass loads can never produce a null). 2900 Node *chk_off_X = chk_off; 2901 #ifdef _LP64 2902 chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X)); 2903 #endif 2904 Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X)); 2905 // For some types like interfaces the following loadKlass is from a 1-word 2906 // cache which is mutable so can't use immutable memory. Other 2907 // types load from the super-class display table which is immutable. 2908 Node *kmem = C->immutable_memory(); 2909 // secondary_super_cache is not immutable but can be treated as such because: 2910 // - no ideal node writes to it in a way that could cause an 2911 // incorrect/missed optimization of the following Load. 2912 // - it's a cache so, worse case, not reading the latest value 2913 // wouldn't cause incorrect execution 2914 if (might_be_cache && mem != nullptr) { 2915 kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem; 2916 } 2917 Node *nkls = gvn.transform(LoadKlassNode::make(gvn, nullptr, kmem, p2, gvn.type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); 2918 2919 // Compile speed common case: ARE a subtype and we canNOT fail 2920 if (superklass == nkls) { 2921 return C->top(); // false path is dead; no test needed. 2922 } 2923 2924 // Gather the various success & failures here 2925 RegionNode* r_not_subtype = new RegionNode(3); 2926 gvn.record_for_igvn(r_not_subtype); 2927 RegionNode* r_ok_subtype = new RegionNode(4); 2928 gvn.record_for_igvn(r_ok_subtype); 2929 2930 // If we might perform an expensive check, first try to take advantage of profile data that was attached to the 2931 // SubTypeCheck node 2932 if (might_be_cache && method != nullptr && VM_Version::profile_all_receivers_at_type_check()) { 2933 ciCallProfile profile = method->call_profile_at_bci(bci); 2934 float total_prob = 0; 2935 for (int i = 0; profile.has_receiver(i); ++i) { 2936 float prob = profile.receiver_prob(i); 2937 total_prob += prob; 2938 } 2939 if (total_prob * 100. >= TypeProfileSubTypeCheckCommonThreshold) { 2940 const TypeKlassPtr* superk = gvn.type(superklass)->is_klassptr(); 2941 for (int i = 0; profile.has_receiver(i); ++i) { 2942 ciKlass* klass = profile.receiver(i); 2943 const TypeKlassPtr* klass_t = TypeKlassPtr::make(klass); 2944 Compile::SubTypeCheckResult result = C->static_subtype_check(superk, klass_t); 2945 if (result != Compile::SSC_always_true && result != Compile::SSC_always_false) { 2946 continue; 2947 } 2948 float prob = profile.receiver_prob(i); 2949 ConNode* klass_node = gvn.makecon(klass_t); 2950 IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, klass_node, BoolTest::eq, prob, gvn, T_ADDRESS); 2951 Node* iftrue = gvn.transform(new IfTrueNode(iff)); 2952 2953 if (result == Compile::SSC_always_true) { 2954 r_ok_subtype->add_req(iftrue); 2955 } else { 2956 assert(result == Compile::SSC_always_false, ""); 2957 r_not_subtype->add_req(iftrue); 2958 } 2959 *ctrl = gvn.transform(new IfFalseNode(iff)); 2960 } 2961 } 2962 } 2963 2964 // See if we get an immediate positive hit. Happens roughly 83% of the 2965 // time. Test to see if the value loaded just previously from the subklass 2966 // is exactly the superklass. 2967 IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS); 2968 Node *iftrue1 = gvn.transform( new IfTrueNode (iff1)); 2969 *ctrl = gvn.transform(new IfFalseNode(iff1)); 2970 2971 // Compile speed common case: Check for being deterministic right now. If 2972 // chk_off is a constant and not equal to cacheoff then we are NOT a 2973 // subklass. In this case we need exactly the 1 test above and we can 2974 // return those results immediately. 2975 if (!might_be_cache) { 2976 Node* not_subtype_ctrl = *ctrl; 2977 *ctrl = iftrue1; // We need exactly the 1 test above 2978 PhaseIterGVN* igvn = gvn.is_IterGVN(); 2979 if (igvn != nullptr) { 2980 igvn->remove_globally_dead_node(r_ok_subtype); 2981 igvn->remove_globally_dead_node(r_not_subtype); 2982 } 2983 return not_subtype_ctrl; 2984 } 2985 2986 r_ok_subtype->init_req(1, iftrue1); 2987 2988 // Check for immediate negative hit. Happens roughly 11% of the time (which 2989 // is roughly 63% of the remaining cases). Test to see if the loaded 2990 // check-offset points into the subklass display list or the 1-element 2991 // cache. If it points to the display (and NOT the cache) and the display 2992 // missed then it's not a subtype. 2993 Node *cacheoff = gvn.intcon(cacheoff_con); 2994 IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT); 2995 r_not_subtype->init_req(1, gvn.transform(new IfTrueNode (iff2))); 2996 *ctrl = gvn.transform(new IfFalseNode(iff2)); 2997 2998 // Check for self. Very rare to get here, but it is taken 1/3 the time. 2999 // No performance impact (too rare) but allows sharing of secondary arrays 3000 // which has some footprint reduction. 3001 IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS); 3002 r_ok_subtype->init_req(2, gvn.transform(new IfTrueNode(iff3))); 3003 *ctrl = gvn.transform(new IfFalseNode(iff3)); 3004 3005 // -- Roads not taken here: -- 3006 // We could also have chosen to perform the self-check at the beginning 3007 // of this code sequence, as the assembler does. This would not pay off 3008 // the same way, since the optimizer, unlike the assembler, can perform 3009 // static type analysis to fold away many successful self-checks. 3010 // Non-foldable self checks work better here in second position, because 3011 // the initial primary superclass check subsumes a self-check for most 3012 // types. An exception would be a secondary type like array-of-interface, 3013 // which does not appear in its own primary supertype display. 3014 // Finally, we could have chosen to move the self-check into the 3015 // PartialSubtypeCheckNode, and from there out-of-line in a platform 3016 // dependent manner. But it is worthwhile to have the check here, 3017 // where it can be perhaps be optimized. The cost in code space is 3018 // small (register compare, branch). 3019 3020 // Now do a linear scan of the secondary super-klass array. Again, no real 3021 // performance impact (too rare) but it's gotta be done. 3022 // Since the code is rarely used, there is no penalty for moving it 3023 // out of line, and it can only improve I-cache density. 3024 // The decision to inline or out-of-line this final check is platform 3025 // dependent, and is found in the AD file definition of PartialSubtypeCheck. 3026 Node* psc = gvn.transform( 3027 new PartialSubtypeCheckNode(*ctrl, subklass, superklass)); 3028 3029 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS); 3030 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4))); 3031 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4))); 3032 3033 // Return false path; set default control to true path. 3034 *ctrl = gvn.transform(r_ok_subtype); 3035 return gvn.transform(r_not_subtype); 3036 } 3037 3038 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) { 3039 const Type* sub_t = _gvn.type(obj_or_subklass); 3040 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) { 3041 sub_t = TypeKlassPtr::make(sub_t->inline_klass()); 3042 obj_or_subklass = makecon(sub_t); 3043 } 3044 bool expand_subtype_check = C->post_loop_opts_phase() || // macro node expansion is over 3045 ExpandSubTypeCheckAtParseTime; // forced expansion 3046 if (expand_subtype_check) { 3047 MergeMemNode* mem = merged_memory(); 3048 Node* ctrl = control(); 3049 Node* subklass = obj_or_subklass; 3050 if (!sub_t->isa_klassptr()) { 3051 subklass = load_object_klass(obj_or_subklass); 3052 } 3053 3054 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci()); 3055 set_control(ctrl); 3056 return n; 3057 } 3058 3059 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci())); 3060 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq)); 3061 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 3062 set_control(_gvn.transform(new IfTrueNode(iff))); 3063 return _gvn.transform(new IfFalseNode(iff)); 3064 } 3065 3066 // Profile-driven exact type check: 3067 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, 3068 float prob, Node* *casted_receiver) { 3069 assert(!klass->is_interface(), "no exact type check on interfaces"); 3070 Node* fail = top(); 3071 const Type* rec_t = _gvn.type(receiver); 3072 if (rec_t->is_inlinetypeptr()) { 3073 if (klass->equals(rec_t->inline_klass())) { 3074 (*casted_receiver) = receiver; // Always passes 3075 } else { 3076 (*casted_receiver) = top(); // Always fails 3077 fail = control(); 3078 set_control(top()); 3079 } 3080 return fail; 3081 } 3082 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces); 3083 Node* recv_klass = load_object_klass(receiver); 3084 fail = type_check(recv_klass, tklass, prob); 3085 3086 if (!stopped()) { 3087 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr(); 3088 const TypeOopPtr* recv_xtype = tklass->as_instance_type(); 3089 assert(recv_xtype->klass_is_exact(), ""); 3090 3091 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts 3092 // Subsume downstream occurrences of receiver with a cast to 3093 // recv_xtype, since now we know what the type will be. 3094 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); 3095 Node* res = _gvn.transform(cast); 3096 if (recv_xtype->is_inlinetypeptr()) { 3097 assert(!gvn().type(res)->maybe_null(), "receiver should never be null"); 3098 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass()); 3099 } 3100 (*casted_receiver) = res; 3101 assert(!(*casted_receiver)->is_top(), "that path should be unreachable"); 3102 // (User must make the replace_in_map call.) 3103 } 3104 } 3105 3106 return fail; 3107 } 3108 3109 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass, 3110 float prob) { 3111 Node* want_klass = makecon(tklass); 3112 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass)); 3113 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); 3114 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); 3115 set_control(_gvn.transform(new IfTrueNode (iff))); 3116 Node* fail = _gvn.transform(new IfFalseNode(iff)); 3117 return fail; 3118 } 3119 3120 //------------------------------subtype_check_receiver------------------------- 3121 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, 3122 Node** casted_receiver) { 3123 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve(); 3124 Node* want_klass = makecon(tklass); 3125 3126 Node* slow_ctl = gen_subtype_check(receiver, want_klass); 3127 3128 // Ignore interface type information until interface types are properly tracked. 3129 if (!stopped() && !klass->is_interface()) { 3130 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr(); 3131 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type(); 3132 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts 3133 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type)); 3134 if (recv_type->is_inlinetypeptr()) { 3135 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass()); 3136 } 3137 (*casted_receiver) = cast; 3138 } 3139 } 3140 3141 return slow_ctl; 3142 } 3143 3144 //------------------------------seems_never_null------------------------------- 3145 // Use null_seen information if it is available from the profile. 3146 // If we see an unexpected null at a type check we record it and force a 3147 // recompile; the offending check will be recompiled to handle nulls. 3148 // If we see several offending BCIs, then all checks in the 3149 // method will be recompiled. 3150 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) { 3151 speculating = !_gvn.type(obj)->speculative_maybe_null(); 3152 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating); 3153 if (UncommonNullCast // Cutout for this technique 3154 && obj != null() // And not the -Xcomp stupid case? 3155 && !too_many_traps(reason) 3156 ) { 3157 if (speculating) { 3158 return true; 3159 } 3160 if (data == nullptr) 3161 // Edge case: no mature data. Be optimistic here. 3162 return true; 3163 // If the profile has not seen a null, assume it won't happen. 3164 assert(java_bc() == Bytecodes::_checkcast || 3165 java_bc() == Bytecodes::_instanceof || 3166 java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here"); 3167 return !data->as_BitData()->null_seen(); 3168 } 3169 speculating = false; 3170 return false; 3171 } 3172 3173 void GraphKit::guard_klass_being_initialized(Node* klass) { 3174 int init_state_off = in_bytes(InstanceKlass::init_state_offset()); 3175 Node* adr = basic_plus_adr(top(), klass, init_state_off); 3176 Node* init_state = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, 3177 adr->bottom_type()->is_ptr(), TypeInt::BYTE, 3178 T_BYTE, MemNode::unordered); 3179 init_state = _gvn.transform(init_state); 3180 3181 Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized)); 3182 3183 Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state)); 3184 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq)); 3185 3186 { BuildCutout unless(this, tst, PROB_MAX); 3187 uncommon_trap(Deoptimization::Reason_initialized, Deoptimization::Action_reinterpret); 3188 } 3189 } 3190 3191 void GraphKit::guard_init_thread(Node* klass) { 3192 int init_thread_off = in_bytes(InstanceKlass::init_thread_offset()); 3193 Node* adr = basic_plus_adr(top(), klass, init_thread_off); 3194 3195 Node* init_thread = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, 3196 adr->bottom_type()->is_ptr(), TypePtr::NOTNULL, 3197 T_ADDRESS, MemNode::unordered); 3198 init_thread = _gvn.transform(init_thread); 3199 3200 Node* cur_thread = _gvn.transform(new ThreadLocalNode()); 3201 3202 Node* chk = _gvn.transform(new CmpPNode(cur_thread, init_thread)); 3203 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq)); 3204 3205 { BuildCutout unless(this, tst, PROB_MAX); 3206 uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_none); 3207 } 3208 } 3209 3210 void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) { 3211 if (ik->is_being_initialized()) { 3212 if (C->needs_clinit_barrier(ik, context)) { 3213 Node* klass = makecon(TypeKlassPtr::make(ik)); 3214 guard_klass_being_initialized(klass); 3215 guard_init_thread(klass); 3216 insert_mem_bar(Op_MemBarCPUOrder); 3217 } 3218 } else if (ik->is_initialized()) { 3219 return; // no barrier needed 3220 } else { 3221 uncommon_trap(Deoptimization::Reason_uninitialized, 3222 Deoptimization::Action_reinterpret, 3223 nullptr); 3224 } 3225 } 3226 3227 //------------------------maybe_cast_profiled_receiver------------------------- 3228 // If the profile has seen exactly one type, narrow to exactly that type. 3229 // Subsequent type checks will always fold up. 3230 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, 3231 const TypeKlassPtr* require_klass, 3232 ciKlass* spec_klass, 3233 bool safe_for_replace) { 3234 if (!UseTypeProfile || !TypeProfileCasts) return nullptr; 3235 3236 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr); 3237 3238 // Make sure we haven't already deoptimized from this tactic. 3239 if (too_many_traps_or_recompiles(reason)) 3240 return nullptr; 3241 3242 // (No, this isn't a call, but it's enough like a virtual call 3243 // to use the same ciMethod accessor to get the profile info...) 3244 // If we have a speculative type use it instead of profiling (which 3245 // may not help us) 3246 ciKlass* exact_kls = spec_klass; 3247 if (exact_kls == nullptr) { 3248 if (java_bc() == Bytecodes::_aastore) { 3249 ciKlass* array_type = nullptr; 3250 ciKlass* element_type = nullptr; 3251 ProfilePtrKind element_ptr = ProfileMaybeNull; 3252 bool flat_array = true; 3253 bool null_free_array = true; 3254 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 3255 exact_kls = element_type; 3256 } else { 3257 exact_kls = profile_has_unique_klass(); 3258 } 3259 } 3260 if (exact_kls != nullptr) {// no cast failures here 3261 if (require_klass == nullptr || 3262 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) { 3263 // If we narrow the type to match what the type profile sees or 3264 // the speculative type, we can then remove the rest of the 3265 // cast. 3266 // This is a win, even if the exact_kls is very specific, 3267 // because downstream operations, such as method calls, 3268 // will often benefit from the sharper type. 3269 Node* exact_obj = not_null_obj; // will get updated in place... 3270 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 3271 &exact_obj); 3272 { PreserveJVMState pjvms(this); 3273 set_control(slow_ctl); 3274 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 3275 } 3276 if (safe_for_replace) { 3277 replace_in_map(not_null_obj, exact_obj); 3278 } 3279 return exact_obj; 3280 } 3281 // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us. 3282 } 3283 3284 return nullptr; 3285 } 3286 3287 /** 3288 * Cast obj to type and emit guard unless we had too many traps here 3289 * already 3290 * 3291 * @param obj node being casted 3292 * @param type type to cast the node to 3293 * @param not_null true if we know node cannot be null 3294 */ 3295 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, 3296 ciKlass* type, 3297 bool not_null) { 3298 if (stopped()) { 3299 return obj; 3300 } 3301 3302 // type is null if profiling tells us this object is always null 3303 if (type != nullptr) { 3304 Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; 3305 Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; 3306 3307 if (!too_many_traps_or_recompiles(null_reason) && 3308 !too_many_traps_or_recompiles(class_reason)) { 3309 Node* not_null_obj = nullptr; 3310 // not_null is true if we know the object is not null and 3311 // there's no need for a null check 3312 if (!not_null) { 3313 Node* null_ctl = top(); 3314 not_null_obj = null_check_oop(obj, &null_ctl, true, true, true); 3315 assert(null_ctl->is_top(), "no null control here"); 3316 } else { 3317 not_null_obj = obj; 3318 } 3319 3320 Node* exact_obj = not_null_obj; 3321 ciKlass* exact_kls = type; 3322 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 3323 &exact_obj); 3324 { 3325 PreserveJVMState pjvms(this); 3326 set_control(slow_ctl); 3327 uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile); 3328 } 3329 replace_in_map(not_null_obj, exact_obj); 3330 obj = exact_obj; 3331 } 3332 } else { 3333 if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) { 3334 Node* exact_obj = null_assert(obj); 3335 replace_in_map(obj, exact_obj); 3336 obj = exact_obj; 3337 } 3338 } 3339 return obj; 3340 } 3341 3342 //-------------------------------gen_instanceof-------------------------------- 3343 // Generate an instance-of idiom. Used by both the instance-of bytecode 3344 // and the reflective instance-of call. 3345 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) { 3346 kill_dead_locals(); // Benefit all the uncommon traps 3347 assert( !stopped(), "dead parse path should be checked in callers" ); 3348 assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), 3349 "must check for not-null not-dead klass in callers"); 3350 3351 // Make the merge point 3352 enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; 3353 RegionNode* region = new RegionNode(PATH_LIMIT); 3354 Node* phi = new PhiNode(region, TypeInt::BOOL); 3355 C->set_has_split_ifs(true); // Has chance for split-if optimization 3356 3357 ciProfileData* data = nullptr; 3358 if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode 3359 data = method()->method_data()->bci_to_data(bci()); 3360 } 3361 bool speculative_not_null = false; 3362 bool never_see_null = (ProfileDynamicTypes // aggressive use of profile 3363 && seems_never_null(obj, data, speculative_not_null)); 3364 3365 // Null check; get casted pointer; set region slot 3 3366 Node* null_ctl = top(); 3367 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); 3368 3369 // If not_null_obj is dead, only null-path is taken 3370 if (stopped()) { // Doing instance-of on a null? 3371 set_control(null_ctl); 3372 return intcon(0); 3373 } 3374 region->init_req(_null_path, null_ctl); 3375 phi ->init_req(_null_path, intcon(0)); // Set null path value 3376 if (null_ctl == top()) { 3377 // Do this eagerly, so that pattern matches like is_diamond_phi 3378 // will work even during parsing. 3379 assert(_null_path == PATH_LIMIT-1, "delete last"); 3380 region->del_req(_null_path); 3381 phi ->del_req(_null_path); 3382 } 3383 3384 // Do we know the type check always succeed? 3385 bool known_statically = false; 3386 if (_gvn.type(superklass)->singleton()) { 3387 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr(); 3388 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type(); 3389 if (subk != nullptr && subk->is_loaded()) { 3390 int static_res = C->static_subtype_check(superk, subk); 3391 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); 3392 } 3393 } 3394 3395 if (!known_statically) { 3396 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 3397 // We may not have profiling here or it may not help us. If we 3398 // have a speculative type use it to perform an exact cast. 3399 ciKlass* spec_obj_type = obj_type->speculative_type(); 3400 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) { 3401 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace); 3402 if (stopped()) { // Profile disagrees with this path. 3403 set_control(null_ctl); // Null is the only remaining possibility. 3404 return intcon(0); 3405 } 3406 if (cast_obj != nullptr) { 3407 not_null_obj = cast_obj; 3408 } 3409 } 3410 } 3411 3412 // Generate the subtype check 3413 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass); 3414 3415 // Plug in the success path to the general merge in slot 1. 3416 region->init_req(_obj_path, control()); 3417 phi ->init_req(_obj_path, intcon(1)); 3418 3419 // Plug in the failing path to the general merge in slot 2. 3420 region->init_req(_fail_path, not_subtype_ctrl); 3421 phi ->init_req(_fail_path, intcon(0)); 3422 3423 // Return final merged results 3424 set_control( _gvn.transform(region) ); 3425 record_for_igvn(region); 3426 3427 // If we know the type check always succeeds then we don't use the 3428 // profiling data at this bytecode. Don't lose it, feed it to the 3429 // type system as a speculative type. 3430 if (safe_for_replace) { 3431 Node* casted_obj = record_profiled_receiver_for_speculation(obj); 3432 replace_in_map(obj, casted_obj); 3433 } 3434 3435 return _gvn.transform(phi); 3436 } 3437 3438 //-------------------------------gen_checkcast--------------------------------- 3439 // Generate a checkcast idiom. Used by both the checkcast bytecode and the 3440 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the 3441 // uncommon-trap paths work. Adjust stack after this call. 3442 // If failure_control is supplied and not null, it is filled in with 3443 // the control edge for the cast failure. Otherwise, an appropriate 3444 // uncommon trap or exception is thrown. 3445 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) { 3446 kill_dead_locals(); // Benefit all the uncommon traps 3447 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr(); 3448 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve(); 3449 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type(); 3450 bool safe_for_replace = (failure_control == nullptr); 3451 assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer"); 3452 3453 // Fast cutout: Check the case that the cast is vacuously true. 3454 // This detects the common cases where the test will short-circuit 3455 // away completely. We do this before we perform the null check, 3456 // because if the test is going to turn into zero code, we don't 3457 // want a residual null check left around. (Causes a slowdown, 3458 // for example, in some objArray manipulations, such as a[i]=a[j].) 3459 if (improved_klass_ptr_type->singleton()) { 3460 const TypeKlassPtr* kptr = nullptr; 3461 const Type* t = _gvn.type(obj); 3462 if (t->isa_oop_ptr()) { 3463 kptr = t->is_oopptr()->as_klass_type(); 3464 } else if (obj->is_InlineType()) { 3465 ciInlineKlass* vk = t->inline_klass(); 3466 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0)); 3467 } 3468 if (kptr != nullptr) { 3469 switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) { 3470 case Compile::SSC_always_true: 3471 // If we know the type check always succeed then we don't use 3472 // the profiling data at this bytecode. Don't lose it, feed it 3473 // to the type system as a speculative type. 3474 obj = record_profiled_receiver_for_speculation(obj); 3475 if (null_free) { 3476 assert(safe_for_replace, "must be"); 3477 obj = null_check(obj); 3478 } 3479 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized"); 3480 return obj; 3481 case Compile::SSC_always_false: 3482 if (null_free) { 3483 assert(safe_for_replace, "must be"); 3484 obj = null_check(obj); 3485 } 3486 // It needs a null check because a null will *pass* the cast check. 3487 if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) { 3488 bool is_aastore = (java_bc() == Bytecodes::_aastore); 3489 Deoptimization::DeoptReason reason = is_aastore ? 3490 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check; 3491 builtin_throw(reason); 3492 return top(); 3493 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) { 3494 return null_assert(obj); 3495 } 3496 break; // Fall through to full check 3497 default: 3498 break; 3499 } 3500 } 3501 } 3502 3503 ciProfileData* data = nullptr; 3504 if (failure_control == nullptr) { // use MDO in regular case only 3505 assert(java_bc() == Bytecodes::_aastore || 3506 java_bc() == Bytecodes::_checkcast, 3507 "interpreter profiles type checks only for these BCs"); 3508 if (method()->method_data()->is_mature()) { 3509 data = method()->method_data()->bci_to_data(bci()); 3510 } 3511 } 3512 3513 // Make the merge point 3514 enum { _obj_path = 1, _null_path, PATH_LIMIT }; 3515 RegionNode* region = new RegionNode(PATH_LIMIT); 3516 Node* phi = new PhiNode(region, toop); 3517 _gvn.set_type(region, Type::CONTROL); 3518 _gvn.set_type(phi, toop); 3519 3520 C->set_has_split_ifs(true); // Has chance for split-if optimization 3521 3522 // Use null-cast information if it is available 3523 bool speculative_not_null = false; 3524 bool never_see_null = ((failure_control == nullptr) // regular case only 3525 && seems_never_null(obj, data, speculative_not_null)); 3526 3527 if (obj->is_InlineType()) { 3528 // Re-execute if buffering during triggers deoptimization 3529 PreserveReexecuteState preexecs(this); 3530 jvms()->set_should_reexecute(true); 3531 obj = obj->as_InlineType()->buffer(this, safe_for_replace); 3532 } 3533 3534 // Null check; get casted pointer; set region slot 3 3535 Node* null_ctl = top(); 3536 Node* not_null_obj = nullptr; 3537 if (null_free) { 3538 assert(safe_for_replace, "must be"); 3539 not_null_obj = null_check(obj); 3540 } else { 3541 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); 3542 } 3543 3544 // If not_null_obj is dead, only null-path is taken 3545 if (stopped()) { // Doing instance-of on a null? 3546 set_control(null_ctl); 3547 if (toop->is_inlinetypeptr()) { 3548 return InlineTypeNode::make_null(_gvn, toop->inline_klass()); 3549 } 3550 return null(); 3551 } 3552 region->init_req(_null_path, null_ctl); 3553 phi ->init_req(_null_path, null()); // Set null path value 3554 if (null_ctl == top()) { 3555 // Do this eagerly, so that pattern matches like is_diamond_phi 3556 // will work even during parsing. 3557 assert(_null_path == PATH_LIMIT-1, "delete last"); 3558 region->del_req(_null_path); 3559 phi ->del_req(_null_path); 3560 } 3561 3562 Node* cast_obj = nullptr; 3563 if (improved_klass_ptr_type->klass_is_exact()) { 3564 // The following optimization tries to statically cast the speculative type of the object 3565 // (for example obtained during profiling) to the type of the superklass and then do a 3566 // dynamic check that the type of the object is what we expect. To work correctly 3567 // for checkcast and aastore the type of superklass should be exact. 3568 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 3569 // We may not have profiling here or it may not help us. If we have 3570 // a speculative type use it to perform an exact cast. 3571 ciKlass* spec_obj_type = obj_type->speculative_type(); 3572 if (spec_obj_type != nullptr || data != nullptr) { 3573 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace); 3574 if (cast_obj != nullptr) { 3575 if (failure_control != nullptr) // failure is now impossible 3576 (*failure_control) = top(); 3577 // adjust the type of the phi to the exact klass: 3578 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); 3579 } 3580 } 3581 } 3582 3583 if (cast_obj == nullptr) { 3584 // Generate the subtype check 3585 Node* improved_superklass = superklass; 3586 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) { 3587 // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up. 3588 // The other non-constant cases cannot be improved with a cast node here since they could be folded to top. 3589 // Additionally, the benefit would only be minor in non-constant cases. 3590 improved_superklass = makecon(improved_klass_ptr_type); 3591 } 3592 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass); 3593 // Plug in success path into the merge 3594 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); 3595 // Failure path ends in uncommon trap (or may be dead - failure impossible) 3596 if (failure_control == nullptr) { 3597 if (not_subtype_ctrl != top()) { // If failure is possible 3598 PreserveJVMState pjvms(this); 3599 set_control(not_subtype_ctrl); 3600 Node* obj_klass = nullptr; 3601 if (not_null_obj->is_InlineType()) { 3602 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass())); 3603 } else { 3604 obj_klass = load_object_klass(not_null_obj); 3605 } 3606 bool is_aastore = (java_bc() == Bytecodes::_aastore); 3607 Deoptimization::DeoptReason reason = is_aastore ? 3608 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check; 3609 builtin_throw(reason); 3610 } 3611 } else { 3612 (*failure_control) = not_subtype_ctrl; 3613 } 3614 } 3615 3616 region->init_req(_obj_path, control()); 3617 phi ->init_req(_obj_path, cast_obj); 3618 3619 // A merge of null or Casted-NotNull obj 3620 Node* res = _gvn.transform(phi); 3621 3622 // Note I do NOT always 'replace_in_map(obj,result)' here. 3623 // if( tk->klass()->can_be_primary_super() ) 3624 // This means that if I successfully store an Object into an array-of-String 3625 // I 'forget' that the Object is really now known to be a String. I have to 3626 // do this because we don't have true union types for interfaces - if I store 3627 // a Baz into an array-of-Interface and then tell the optimizer it's an 3628 // Interface, I forget that it's also a Baz and cannot do Baz-like field 3629 // references to it. FIX THIS WHEN UNION TYPES APPEAR! 3630 // replace_in_map( obj, res ); 3631 3632 // Return final merged results 3633 set_control( _gvn.transform(region) ); 3634 record_for_igvn(region); 3635 3636 bool not_inline = !toop->can_be_inline_type(); 3637 bool not_flat_in_array = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array()); 3638 if (EnableValhalla && not_flat_in_array) { 3639 // Check if obj has been loaded from an array 3640 obj = obj->isa_DecodeN() ? obj->in(1) : obj; 3641 Node* array = nullptr; 3642 if (obj->isa_Load()) { 3643 Node* address = obj->in(MemNode::Address); 3644 if (address->isa_AddP()) { 3645 array = address->as_AddP()->in(AddPNode::Base); 3646 } 3647 } else if (obj->is_Phi()) { 3648 Node* region = obj->in(0); 3649 // TODO make this more robust (see JDK-8231346) 3650 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) { 3651 IfNode* iff = region->in(2)->in(0)->isa_If(); 3652 if (iff != nullptr) { 3653 iff->is_flat_array_check(&_gvn, &array); 3654 } 3655 } 3656 } 3657 if (array != nullptr) { 3658 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr(); 3659 if (ary_t != nullptr && !ary_t->is_flat()) { 3660 if (!ary_t->is_not_null_free() && not_inline) { 3661 // Casting array element to a non-inline-type, mark array as not null-free. 3662 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free())); 3663 replace_in_map(array, cast); 3664 } else if (!ary_t->is_not_flat()) { 3665 // Casting array element to a non-flat type, mark array as not flat. 3666 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat())); 3667 replace_in_map(array, cast); 3668 } 3669 } 3670 } 3671 } 3672 3673 if (!stopped() && !res->is_InlineType()) { 3674 res = record_profiled_receiver_for_speculation(res); 3675 if (toop->is_inlinetypeptr()) { 3676 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null()); 3677 res = vt; 3678 if (safe_for_replace) { 3679 replace_in_map(obj, vt); 3680 replace_in_map(not_null_obj, vt); 3681 replace_in_map(res, vt); 3682 } 3683 } 3684 } 3685 return res; 3686 } 3687 3688 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) { 3689 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 3690 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 3691 Node* mask = MakeConX(markWord::inline_type_pattern); 3692 Node* masked = _gvn.transform(new AndXNode(mark, mask)); 3693 Node* cmp = _gvn.transform(new CmpXNode(masked, mask)); 3694 return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne)); 3695 } 3696 3697 Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) { 3698 Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset())); 3699 // Make sure to use immutable memory here to enable hoisting the check out of loops 3700 Node* lh_val = _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), lh_adr, lh_adr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered)); 3701 Node* masked = _gvn.transform(new AndINode(lh_val, intcon(mask))); 3702 Node* cmp = _gvn.transform(new CmpINode(masked, intcon(val))); 3703 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne)); 3704 } 3705 3706 // TODO 8325106 With JEP 401, flatness is not a property of the Class anymore. 3707 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) { 3708 // We can't use immutable memory here because the mark word is mutable. 3709 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the 3710 // check is moved out of loops (mainly to enable loop unswitching). 3711 Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory(); 3712 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, array_or_klass)); 3713 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN 3714 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne)); 3715 } 3716 3717 Node* GraphKit::null_free_array_test(Node* klass, bool null_free) { 3718 return array_lh_test(klass, Klass::_lh_null_free_array_bit_inplace, 0, !null_free); 3719 } 3720 3721 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null 3722 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) { 3723 RegionNode* region = new RegionNode(3); 3724 Node* null_ctl = top(); 3725 null_check_oop(val, &null_ctl); 3726 if (null_ctl != top()) { 3727 PreserveJVMState pjvms(this); 3728 set_control(null_ctl); 3729 { 3730 // Deoptimize if null-free array 3731 BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX); 3732 inc_sp(nargs); 3733 uncommon_trap(Deoptimization::Reason_null_check, 3734 Deoptimization::Action_none); 3735 } 3736 region->init_req(1, control()); 3737 } 3738 region->init_req(2, control()); 3739 set_control(_gvn.transform(region)); 3740 record_for_igvn(region); 3741 if (_gvn.type(val) == TypePtr::NULL_PTR) { 3742 // Since we were just successfully storing null, the array can't be null free. 3743 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 3744 ary_t = ary_t->cast_to_not_null_free(); 3745 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); 3746 if (safe_for_replace) { 3747 replace_in_map(ary, cast); 3748 } 3749 ary = cast; 3750 } 3751 return ary; 3752 } 3753 3754 //------------------------------next_monitor----------------------------------- 3755 // What number should be given to the next monitor? 3756 int GraphKit::next_monitor() { 3757 int current = jvms()->monitor_depth()* C->sync_stack_slots(); 3758 int next = current + C->sync_stack_slots(); 3759 // Keep the toplevel high water mark current: 3760 if (C->fixed_slots() < next) C->set_fixed_slots(next); 3761 return current; 3762 } 3763 3764 //------------------------------insert_mem_bar--------------------------------- 3765 // Memory barrier to avoid floating things around 3766 // The membar serves as a pinch point between both control and all memory slices. 3767 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { 3768 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); 3769 mb->init_req(TypeFunc::Control, control()); 3770 mb->init_req(TypeFunc::Memory, reset_memory()); 3771 Node* membar = _gvn.transform(mb); 3772 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); 3773 set_all_memory_call(membar); 3774 return membar; 3775 } 3776 3777 //-------------------------insert_mem_bar_volatile---------------------------- 3778 // Memory barrier to avoid floating things around 3779 // The membar serves as a pinch point between both control and memory(alias_idx). 3780 // If you want to make a pinch point on all memory slices, do not use this 3781 // function (even with AliasIdxBot); use insert_mem_bar() instead. 3782 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) { 3783 // When Parse::do_put_xxx updates a volatile field, it appends a series 3784 // of MemBarVolatile nodes, one for *each* volatile field alias category. 3785 // The first membar is on the same memory slice as the field store opcode. 3786 // This forces the membar to follow the store. (Bug 6500685 broke this.) 3787 // All the other membars (for other volatile slices, including AliasIdxBot, 3788 // which stands for all unknown volatile slices) are control-dependent 3789 // on the first membar. This prevents later volatile loads or stores 3790 // from sliding up past the just-emitted store. 3791 3792 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent); 3793 mb->set_req(TypeFunc::Control,control()); 3794 if (alias_idx == Compile::AliasIdxBot) { 3795 mb->set_req(TypeFunc::Memory, merged_memory()->base_memory()); 3796 } else { 3797 assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller"); 3798 mb->set_req(TypeFunc::Memory, memory(alias_idx)); 3799 } 3800 Node* membar = _gvn.transform(mb); 3801 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); 3802 if (alias_idx == Compile::AliasIdxBot) { 3803 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory))); 3804 } else { 3805 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx); 3806 } 3807 return membar; 3808 } 3809 3810 //------------------------------shared_lock------------------------------------ 3811 // Emit locking code. 3812 FastLockNode* GraphKit::shared_lock(Node* obj) { 3813 // bci is either a monitorenter bc or InvocationEntryBci 3814 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces 3815 assert(SynchronizationEntryBCI == InvocationEntryBci, ""); 3816 3817 if( !GenerateSynchronizationCode ) 3818 return nullptr; // Not locking things? 3819 3820 if (stopped()) // Dead monitor? 3821 return nullptr; 3822 3823 assert(dead_locals_are_killed(), "should kill locals before sync. point"); 3824 3825 // Box the stack location 3826 Node* box = _gvn.transform(new BoxLockNode(next_monitor())); 3827 Node* mem = reset_memory(); 3828 3829 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock(); 3830 3831 // Create the rtm counters for this fast lock if needed. 3832 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci 3833 3834 // Add monitor to debug info for the slow path. If we block inside the 3835 // slow path and de-opt, we need the monitor hanging around 3836 map()->push_monitor( flock ); 3837 3838 const TypeFunc *tf = LockNode::lock_type(); 3839 LockNode *lock = new LockNode(C, tf); 3840 3841 lock->init_req( TypeFunc::Control, control() ); 3842 lock->init_req( TypeFunc::Memory , mem ); 3843 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o 3844 lock->init_req( TypeFunc::FramePtr, frameptr() ); 3845 lock->init_req( TypeFunc::ReturnAdr, top() ); 3846 3847 lock->init_req(TypeFunc::Parms + 0, obj); 3848 lock->init_req(TypeFunc::Parms + 1, box); 3849 lock->init_req(TypeFunc::Parms + 2, flock); 3850 add_safepoint_edges(lock); 3851 3852 lock = _gvn.transform( lock )->as_Lock(); 3853 3854 // lock has no side-effects, sets few values 3855 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM); 3856 3857 insert_mem_bar(Op_MemBarAcquireLock); 3858 3859 // Add this to the worklist so that the lock can be eliminated 3860 record_for_igvn(lock); 3861 3862 #ifndef PRODUCT 3863 if (PrintLockStatistics) { 3864 // Update the counter for this lock. Don't bother using an atomic 3865 // operation since we don't require absolute accuracy. 3866 lock->create_lock_counter(map()->jvms()); 3867 increment_counter(lock->counter()->addr()); 3868 } 3869 #endif 3870 3871 return flock; 3872 } 3873 3874 3875 //------------------------------shared_unlock---------------------------------- 3876 // Emit unlocking code. 3877 void GraphKit::shared_unlock(Node* box, Node* obj) { 3878 // bci is either a monitorenter bc or InvocationEntryBci 3879 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces 3880 assert(SynchronizationEntryBCI == InvocationEntryBci, ""); 3881 3882 if( !GenerateSynchronizationCode ) 3883 return; 3884 if (stopped()) { // Dead monitor? 3885 map()->pop_monitor(); // Kill monitor from debug info 3886 return; 3887 } 3888 assert(!obj->is_InlineType(), "should not unlock on inline type"); 3889 3890 // Memory barrier to avoid floating things down past the locked region 3891 insert_mem_bar(Op_MemBarReleaseLock); 3892 3893 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); 3894 UnlockNode *unlock = new UnlockNode(C, tf); 3895 #ifdef ASSERT 3896 unlock->set_dbg_jvms(sync_jvms()); 3897 #endif 3898 uint raw_idx = Compile::AliasIdxRaw; 3899 unlock->init_req( TypeFunc::Control, control() ); 3900 unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); 3901 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o 3902 unlock->init_req( TypeFunc::FramePtr, frameptr() ); 3903 unlock->init_req( TypeFunc::ReturnAdr, top() ); 3904 3905 unlock->init_req(TypeFunc::Parms + 0, obj); 3906 unlock->init_req(TypeFunc::Parms + 1, box); 3907 unlock = _gvn.transform(unlock)->as_Unlock(); 3908 3909 Node* mem = reset_memory(); 3910 3911 // unlock has no side-effects, sets few values 3912 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM); 3913 3914 // Kill monitor from debug info 3915 map()->pop_monitor( ); 3916 } 3917 3918 //-------------------------------get_layout_helper----------------------------- 3919 // If the given klass is a constant or known to be an array, 3920 // fetch the constant layout helper value into constant_value 3921 // and return null. Otherwise, load the non-constant 3922 // layout helper value, and return the node which represents it. 3923 // This two-faced routine is useful because allocation sites 3924 // almost always feature constant types. 3925 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { 3926 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr(); 3927 if (!StressReflectiveCode && klass_t != nullptr) { 3928 bool xklass = klass_t->klass_is_exact(); 3929 bool can_be_flat = false; 3930 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr(); 3931 if (UseFlatArray && !xklass && ary_type != nullptr && !ary_type->is_null_free()) { 3932 // TODO 8325106 Fix comment 3933 // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue. Don't constant fold. 3934 const TypeOopPtr* elem = ary_type->elem()->make_oopptr(); 3935 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array()); 3936 } 3937 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) { 3938 jint lhelper; 3939 if (klass_t->is_flat()) { 3940 lhelper = ary_type->flat_layout_helper(); 3941 } else if (klass_t->isa_aryklassptr()) { 3942 BasicType elem = ary_type->elem()->array_element_basic_type(); 3943 if (is_reference_type(elem, true)) { 3944 elem = T_OBJECT; 3945 } 3946 lhelper = Klass::array_layout_helper(elem); 3947 } else { 3948 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper(); 3949 } 3950 if (lhelper != Klass::_lh_neutral_value) { 3951 constant_value = lhelper; 3952 return (Node*) nullptr; 3953 } 3954 } 3955 } 3956 constant_value = Klass::_lh_neutral_value; // put in a known value 3957 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); 3958 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered); 3959 } 3960 3961 // We just put in an allocate/initialize with a big raw-memory effect. 3962 // Hook selected additional alias categories on the initialization. 3963 static void hook_memory_on_init(GraphKit& kit, int alias_idx, 3964 MergeMemNode* init_in_merge, 3965 Node* init_out_raw) { 3966 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory()); 3967 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, ""); 3968 3969 Node* prevmem = kit.memory(alias_idx); 3970 init_in_merge->set_memory_at(alias_idx, prevmem); 3971 if (init_out_raw != nullptr) { 3972 kit.set_memory(init_out_raw, alias_idx); 3973 } 3974 } 3975 3976 //---------------------------set_output_for_allocation------------------------- 3977 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, 3978 const TypeOopPtr* oop_type, 3979 bool deoptimize_on_exception) { 3980 int rawidx = Compile::AliasIdxRaw; 3981 alloc->set_req( TypeFunc::FramePtr, frameptr() ); 3982 add_safepoint_edges(alloc); 3983 Node* allocx = _gvn.transform(alloc); 3984 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) ); 3985 // create memory projection for i_o 3986 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); 3987 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception); 3988 3989 // create a memory projection as for the normal control path 3990 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory)); 3991 set_memory(malloc, rawidx); 3992 3993 // a normal slow-call doesn't change i_o, but an allocation does 3994 // we create a separate i_o projection for the normal control path 3995 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) ); 3996 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) ); 3997 3998 // put in an initialization barrier 3999 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, 4000 rawoop)->as_Initialize(); 4001 assert(alloc->initialization() == init, "2-way macro link must work"); 4002 assert(init ->allocation() == alloc, "2-way macro link must work"); 4003 { 4004 // Extract memory strands which may participate in the new object's 4005 // initialization, and source them from the new InitializeNode. 4006 // This will allow us to observe initializations when they occur, 4007 // and link them properly (as a group) to the InitializeNode. 4008 assert(init->in(InitializeNode::Memory) == malloc, ""); 4009 MergeMemNode* minit_in = MergeMemNode::make(malloc); 4010 init->set_req(InitializeNode::Memory, minit_in); 4011 record_for_igvn(minit_in); // fold it up later, if possible 4012 _gvn.set_type(minit_in, Type::MEMORY); 4013 Node* minit_out = memory(rawidx); 4014 assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); 4015 // Add an edge in the MergeMem for the header fields so an access 4016 // to one of those has correct memory state 4017 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); 4018 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); 4019 if (oop_type->isa_aryptr()) { 4020 const TypeAryPtr* arytype = oop_type->is_aryptr(); 4021 if (arytype->is_flat()) { 4022 // Initially all flat array accesses share a single slice 4023 // but that changes after parsing. Prepare the memory graph so 4024 // it can optimize flat array accesses properly once they 4025 // don't share a single slice. 4026 assert(C->flat_accesses_share_alias(), "should be set at parse time"); 4027 C->set_flat_accesses_share_alias(false); 4028 ciInlineKlass* vk = arytype->elem()->inline_klass(); 4029 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) { 4030 ciField* field = vk->nonstatic_field_at(i); 4031 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize) 4032 continue; // do not bother to track really large numbers of fields 4033 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset(); 4034 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); 4035 int fieldidx = C->get_alias_index(adr_type, true); 4036 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node 4037 // can result in per flat array field Phis to be created which confuses the logic of 4038 // Compile::adjust_flat_array_access_aliases(). 4039 hook_memory_on_init(*this, fieldidx, minit_in, nullptr); 4040 } 4041 C->set_flat_accesses_share_alias(true); 4042 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out); 4043 } else { 4044 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); 4045 int elemidx = C->get_alias_index(telemref); 4046 hook_memory_on_init(*this, elemidx, minit_in, minit_out); 4047 } 4048 } else if (oop_type->isa_instptr()) { 4049 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word 4050 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass(); 4051 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { 4052 ciField* field = ik->nonstatic_field_at(i); 4053 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize) 4054 continue; // do not bother to track really large numbers of fields 4055 // Find (or create) the alias category for this field: 4056 int fieldidx = C->alias_type(field)->index(); 4057 hook_memory_on_init(*this, fieldidx, minit_in, minit_out); 4058 } 4059 } 4060 } 4061 4062 // Cast raw oop to the real thing... 4063 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type); 4064 javaoop = _gvn.transform(javaoop); 4065 C->set_recent_alloc(control(), javaoop); 4066 assert(just_allocated_object(control()) == javaoop, "just allocated"); 4067 4068 #ifdef ASSERT 4069 { // Verify that the AllocateNode::Ideal_allocation recognizers work: 4070 assert(AllocateNode::Ideal_allocation(rawoop) == alloc, 4071 "Ideal_allocation works"); 4072 assert(AllocateNode::Ideal_allocation(javaoop) == alloc, 4073 "Ideal_allocation works"); 4074 if (alloc->is_AllocateArray()) { 4075 assert(AllocateArrayNode::Ideal_array_allocation(rawoop) == alloc->as_AllocateArray(), 4076 "Ideal_allocation works"); 4077 assert(AllocateArrayNode::Ideal_array_allocation(javaoop) == alloc->as_AllocateArray(), 4078 "Ideal_allocation works"); 4079 } else { 4080 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please"); 4081 } 4082 } 4083 #endif //ASSERT 4084 4085 return javaoop; 4086 } 4087 4088 //---------------------------new_instance-------------------------------------- 4089 // This routine takes a klass_node which may be constant (for a static type) 4090 // or may be non-constant (for reflective code). It will work equally well 4091 // for either, and the graph will fold nicely if the optimizer later reduces 4092 // the type to a constant. 4093 // The optional arguments are for specialized use by intrinsics: 4094 // - If 'extra_slow_test' if not null is an extra condition for the slow-path. 4095 // - If 'return_size_val', report the total object size to the caller. 4096 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) 4097 Node* GraphKit::new_instance(Node* klass_node, 4098 Node* extra_slow_test, 4099 Node* *return_size_val, 4100 bool deoptimize_on_exception, 4101 InlineTypeNode* inline_type_node) { 4102 // Compute size in doublewords 4103 // The size is always an integral number of doublewords, represented 4104 // as a positive bytewise size stored in the klass's layout_helper. 4105 // The layout_helper also encodes (in a low bit) the need for a slow path. 4106 jint layout_con = Klass::_lh_neutral_value; 4107 Node* layout_val = get_layout_helper(klass_node, layout_con); 4108 bool layout_is_con = (layout_val == nullptr); 4109 4110 if (extra_slow_test == nullptr) extra_slow_test = intcon(0); 4111 // Generate the initial go-slow test. It's either ALWAYS (return a 4112 // Node for 1) or NEVER (return a null) or perhaps (in the reflective 4113 // case) a computed value derived from the layout_helper. 4114 Node* initial_slow_test = nullptr; 4115 if (layout_is_con) { 4116 assert(!StressReflectiveCode, "stress mode does not use these paths"); 4117 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); 4118 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test; 4119 } else { // reflective case 4120 // This reflective path is used by Unsafe.allocateInstance. 4121 // (It may be stress-tested by specifying StressReflectiveCode.) 4122 // Basically, we want to get into the VM is there's an illegal argument. 4123 Node* bit = intcon(Klass::_lh_instance_slow_path_bit); 4124 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); 4125 if (extra_slow_test != intcon(0)) { 4126 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); 4127 } 4128 // (Macro-expander will further convert this to a Bool, if necessary.) 4129 } 4130 4131 // Find the size in bytes. This is easy; it's the layout_helper. 4132 // The size value must be valid even if the slow path is taken. 4133 Node* size = nullptr; 4134 if (layout_is_con) { 4135 size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); 4136 } else { // reflective case 4137 // This reflective path is used by clone and Unsafe.allocateInstance. 4138 size = ConvI2X(layout_val); 4139 4140 // Clear the low bits to extract layout_helper_size_in_bytes: 4141 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); 4142 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); 4143 size = _gvn.transform( new AndXNode(size, mask) ); 4144 } 4145 if (return_size_val != nullptr) { 4146 (*return_size_val) = size; 4147 } 4148 4149 // This is a precise notnull oop of the klass. 4150 // (Actually, it need not be precise if this is a reflective allocation.) 4151 // It's what we cast the result to. 4152 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr(); 4153 if (!tklass) tklass = TypeInstKlassPtr::OBJECT; 4154 const TypeOopPtr* oop_type = tklass->as_instance_type(); 4155 4156 // Now generate allocation code 4157 4158 // The entire memory state is needed for slow path of the allocation 4159 // since GC and deoptimization can happen. 4160 Node *mem = reset_memory(); 4161 set_all_memory(mem); // Create new memory state 4162 4163 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), 4164 control(), mem, i_o(), 4165 size, klass_node, 4166 initial_slow_test, inline_type_node); 4167 4168 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); 4169 } 4170 4171 //-------------------------------new_array------------------------------------- 4172 // helper for newarray and anewarray 4173 // The 'length' parameter is (obviously) the length of the array. 4174 // The optional arguments are for specialized use by intrinsics: 4175 // - If 'return_size_val', report the non-padded array size (sum of header size 4176 // and array body) to the caller. 4177 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) 4178 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) 4179 Node* length, // number of array elements 4180 int nargs, // number of arguments to push back for uncommon trap 4181 Node* *return_size_val, 4182 bool deoptimize_on_exception) { 4183 jint layout_con = Klass::_lh_neutral_value; 4184 Node* layout_val = get_layout_helper(klass_node, layout_con); 4185 bool layout_is_con = (layout_val == nullptr); 4186 4187 if (!layout_is_con && !StressReflectiveCode && 4188 !too_many_traps(Deoptimization::Reason_class_check)) { 4189 // This is a reflective array creation site. 4190 // Optimistically assume that it is a subtype of Object[], 4191 // so that we can fold up all the address arithmetic. 4192 layout_con = Klass::array_layout_helper(T_OBJECT); 4193 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) ); 4194 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) ); 4195 { BuildCutout unless(this, bol_lh, PROB_MAX); 4196 inc_sp(nargs); 4197 uncommon_trap(Deoptimization::Reason_class_check, 4198 Deoptimization::Action_maybe_recompile); 4199 } 4200 layout_val = nullptr; 4201 layout_is_con = true; 4202 } 4203 4204 // Generate the initial go-slow test. Make sure we do not overflow 4205 // if length is huge (near 2Gig) or negative! We do not need 4206 // exact double-words here, just a close approximation of needed 4207 // double-words. We can't add any offset or rounding bits, lest we 4208 // take a size -1 of bytes and make it positive. Use an unsigned 4209 // compare, so negative sizes look hugely positive. 4210 int fast_size_limit = FastAllocateSizeLimit; 4211 if (layout_is_con) { 4212 assert(!StressReflectiveCode, "stress mode does not use these paths"); 4213 // Increase the size limit if we have exact knowledge of array type. 4214 int log2_esize = Klass::layout_helper_log2_element_size(layout_con); 4215 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0); 4216 } 4217 4218 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); 4219 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) ); 4220 4221 // --- Size Computation --- 4222 // array_size = round_to_heap(array_header + (length << elem_shift)); 4223 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes) 4224 // and align_to(x, y) == ((x + y-1) & ~(y-1)) 4225 // The rounding mask is strength-reduced, if possible. 4226 int round_mask = MinObjAlignmentInBytes - 1; 4227 Node* header_size = nullptr; 4228 // (T_BYTE has the weakest alignment and size restrictions...) 4229 if (layout_is_con) { 4230 int hsize = Klass::layout_helper_header_size(layout_con); 4231 int eshift = Klass::layout_helper_log2_element_size(layout_con); 4232 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con); 4233 if ((round_mask & ~right_n_bits(eshift)) == 0) 4234 round_mask = 0; // strength-reduce it if it goes away completely 4235 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); 4236 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); 4237 assert(header_size_min <= hsize, "generic minimum is smallest"); 4238 header_size = intcon(hsize); 4239 } else { 4240 Node* hss = intcon(Klass::_lh_header_size_shift); 4241 Node* hsm = intcon(Klass::_lh_header_size_mask); 4242 header_size = _gvn.transform(new URShiftINode(layout_val, hss)); 4243 header_size = _gvn.transform(new AndINode(header_size, hsm)); 4244 } 4245 4246 Node* elem_shift = nullptr; 4247 if (layout_is_con) { 4248 int eshift = Klass::layout_helper_log2_element_size(layout_con); 4249 if (eshift != 0) 4250 elem_shift = intcon(eshift); 4251 } else { 4252 // There is no need to mask or shift this value. 4253 // The semantics of LShiftINode include an implicit mask to 0x1F. 4254 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 4255 elem_shift = layout_val; 4256 } 4257 4258 // Transition to native address size for all offset calculations: 4259 Node* lengthx = ConvI2X(length); 4260 Node* headerx = ConvI2X(header_size); 4261 #ifdef _LP64 4262 { const TypeInt* tilen = _gvn.find_int_type(length); 4263 if (tilen != nullptr && tilen->_lo < 0) { 4264 // Add a manual constraint to a positive range. Cf. array_element_address. 4265 jint size_max = fast_size_limit; 4266 if (size_max > tilen->_hi) size_max = tilen->_hi; 4267 const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin); 4268 4269 // Only do a narrow I2L conversion if the range check passed. 4270 IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN); 4271 _gvn.transform(iff); 4272 RegionNode* region = new RegionNode(3); 4273 _gvn.set_type(region, Type::CONTROL); 4274 lengthx = new PhiNode(region, TypeLong::LONG); 4275 _gvn.set_type(lengthx, TypeLong::LONG); 4276 4277 // Range check passed. Use ConvI2L node with narrow type. 4278 Node* passed = IfFalse(iff); 4279 region->init_req(1, passed); 4280 // Make I2L conversion control dependent to prevent it from 4281 // floating above the range check during loop optimizations. 4282 lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed)); 4283 4284 // Range check failed. Use ConvI2L with wide type because length may be invalid. 4285 region->init_req(2, IfTrue(iff)); 4286 lengthx->init_req(2, ConvI2X(length)); 4287 4288 set_control(region); 4289 record_for_igvn(region); 4290 record_for_igvn(lengthx); 4291 } 4292 } 4293 #endif 4294 4295 // Combine header size and body size for the array copy part, then align (if 4296 // necessary) for the allocation part. This computation cannot overflow, 4297 // because it is used only in two places, one where the length is sharply 4298 // limited, and the other after a successful allocation. 4299 Node* abody = lengthx; 4300 if (elem_shift != nullptr) { 4301 abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift)); 4302 } 4303 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody)); 4304 4305 if (return_size_val != nullptr) { 4306 // This is the size 4307 (*return_size_val) = non_rounded_size; 4308 } 4309 4310 Node* size = non_rounded_size; 4311 if (round_mask != 0) { 4312 Node* mask1 = MakeConX(round_mask); 4313 size = _gvn.transform(new AddXNode(size, mask1)); 4314 Node* mask2 = MakeConX(~round_mask); 4315 size = _gvn.transform(new AndXNode(size, mask2)); 4316 } 4317 // else if round_mask == 0, the size computation is self-rounding 4318 4319 // Now generate allocation code 4320 4321 // The entire memory state is needed for slow path of the allocation 4322 // since GC and deoptimization can happen. 4323 Node *mem = reset_memory(); 4324 set_all_memory(mem); // Create new memory state 4325 4326 if (initial_slow_test->is_Bool()) { 4327 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. 4328 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); 4329 } 4330 4331 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr(); 4332 const TypeOopPtr* ary_type = ary_klass->as_instance_type(); 4333 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr(); 4334 4335 // TODO 8325106 Fix comment 4336 // Inline type array variants: 4337 // - null-ok: MyValue.ref[] (ciObjArrayKlass "[LMyValue") 4338 // - null-free: MyValue.val[] (ciObjArrayKlass "[QMyValue") 4339 // - null-free, flat : MyValue.val[] (ciFlatArrayKlass "[QMyValue") 4340 // Check if array is a null-free, non-flat inline type array 4341 // that needs to be initialized with the default inline type. 4342 Node* default_value = nullptr; 4343 Node* raw_default_value = nullptr; 4344 if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) { 4345 // Array type is known 4346 if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) { 4347 ciInlineKlass* vk = ary_ptr->elem()->inline_klass(); 4348 default_value = InlineTypeNode::default_oop(gvn(), vk); 4349 if (UseCompressedOops) { 4350 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops 4351 default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop())); 4352 Node* lower = _gvn.transform(new CastP2XNode(control(), default_value)); 4353 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32))); 4354 raw_default_value = _gvn.transform(new OrLNode(lower, upper)); 4355 } else { 4356 raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value)); 4357 } 4358 } 4359 } 4360 4361 Node* valid_length_test = _gvn.intcon(1); 4362 if (ary_type->isa_aryptr()) { 4363 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type(); 4364 jint max = TypeAryPtr::max_array_length(bt); 4365 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max))); 4366 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le)); 4367 } 4368 4369 // Create the AllocateArrayNode and its result projections 4370 AllocateArrayNode* alloc 4371 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), 4372 control(), mem, i_o(), 4373 size, klass_node, 4374 initial_slow_test, 4375 length, valid_length_test, 4376 default_value, raw_default_value); 4377 // Cast to correct type. Note that the klass_node may be constant or not, 4378 // and in the latter case the actual array type will be inexact also. 4379 // (This happens via a non-constant argument to inline_native_newArray.) 4380 // In any case, the value of klass_node provides the desired array type. 4381 const TypeInt* length_type = _gvn.find_int_type(length); 4382 if (ary_type->isa_aryptr() && length_type != nullptr) { 4383 // Try to get a better type than POS for the size 4384 ary_type = ary_type->is_aryptr()->cast_to_size(length_type); 4385 } 4386 4387 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception); 4388 4389 array_ideal_length(alloc, ary_type, true); 4390 return javaoop; 4391 } 4392 4393 // The following "Ideal_foo" functions are placed here because they recognize 4394 // the graph shapes created by the functions immediately above. 4395 4396 //---------------------------Ideal_allocation---------------------------------- 4397 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. 4398 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr) { 4399 if (ptr == nullptr) { // reduce dumb test in callers 4400 return nullptr; 4401 } 4402 4403 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4404 ptr = bs->step_over_gc_barrier(ptr); 4405 4406 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast 4407 ptr = ptr->in(1); 4408 if (ptr == nullptr) return nullptr; 4409 } 4410 // Return null for allocations with several casts: 4411 // j.l.reflect.Array.newInstance(jobject, jint) 4412 // Object.clone() 4413 // to keep more precise type from last cast. 4414 if (ptr->is_Proj()) { 4415 Node* allo = ptr->in(0); 4416 if (allo != nullptr && allo->is_Allocate()) { 4417 return allo->as_Allocate(); 4418 } 4419 } 4420 // Report failure to match. 4421 return nullptr; 4422 } 4423 4424 // Fancy version which also strips off an offset (and reports it to caller). 4425 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseValues* phase, 4426 intptr_t& offset) { 4427 Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset); 4428 if (base == nullptr) return nullptr; 4429 return Ideal_allocation(base); 4430 } 4431 4432 // Trace Initialize <- Proj[Parm] <- Allocate 4433 AllocateNode* InitializeNode::allocation() { 4434 Node* rawoop = in(InitializeNode::RawAddress); 4435 if (rawoop->is_Proj()) { 4436 Node* alloc = rawoop->in(0); 4437 if (alloc->is_Allocate()) { 4438 return alloc->as_Allocate(); 4439 } 4440 } 4441 return nullptr; 4442 } 4443 4444 // Trace Allocate -> Proj[Parm] -> Initialize 4445 InitializeNode* AllocateNode::initialization() { 4446 ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress); 4447 if (rawoop == nullptr) return nullptr; 4448 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { 4449 Node* init = rawoop->fast_out(i); 4450 if (init->is_Initialize()) { 4451 assert(init->as_Initialize()->allocation() == this, "2-way link"); 4452 return init->as_Initialize(); 4453 } 4454 } 4455 return nullptr; 4456 } 4457 4458 // Add a Parse Predicate with an uncommon trap on the failing/false path. Normal control will continue on the true path. 4459 void GraphKit::add_parse_predicate(Deoptimization::DeoptReason reason, const int nargs) { 4460 // Too many traps seen? 4461 if (too_many_traps(reason)) { 4462 #ifdef ASSERT 4463 if (TraceLoopPredicate) { 4464 int tc = C->trap_count(reason); 4465 tty->print("too many traps=%s tcount=%d in ", 4466 Deoptimization::trap_reason_name(reason), tc); 4467 method()->print(); // which method has too many predicate traps 4468 tty->cr(); 4469 } 4470 #endif 4471 // We cannot afford to take more traps here, 4472 // do not generate Parse Predicate. 4473 return; 4474 } 4475 4476 ParsePredicateNode* parse_predicate = new ParsePredicateNode(control(), reason, &_gvn); 4477 _gvn.set_type(parse_predicate, parse_predicate->Value(&_gvn)); 4478 Node* if_false = _gvn.transform(new IfFalseNode(parse_predicate)); 4479 { 4480 PreserveJVMState pjvms(this); 4481 set_control(if_false); 4482 inc_sp(nargs); 4483 uncommon_trap(reason, Deoptimization::Action_maybe_recompile); 4484 } 4485 Node* if_true = _gvn.transform(new IfTrueNode(parse_predicate)); 4486 set_control(if_true); 4487 } 4488 4489 // Add Parse Predicates which serve as placeholders to create new Runtime Predicates above them. All 4490 // Runtime Predicates inside a Runtime Predicate block share the same uncommon trap as the Parse Predicate. 4491 void GraphKit::add_parse_predicates(int nargs) { 4492 if (UseLoopPredicate) { 4493 add_parse_predicate(Deoptimization::Reason_predicate, nargs); 4494 } 4495 if (UseProfiledLoopPredicate) { 4496 add_parse_predicate(Deoptimization::Reason_profile_predicate, nargs); 4497 } 4498 // Loop Limit Check Predicate should be near the loop. 4499 add_parse_predicate(Deoptimization::Reason_loop_limit_check, nargs); 4500 } 4501 4502 void GraphKit::sync_kit(IdealKit& ideal) { 4503 set_all_memory(ideal.merged_memory()); 4504 set_i_o(ideal.i_o()); 4505 set_control(ideal.ctrl()); 4506 } 4507 4508 void GraphKit::final_sync(IdealKit& ideal) { 4509 // Final sync IdealKit and graphKit. 4510 sync_kit(ideal); 4511 } 4512 4513 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) { 4514 Node* len = load_array_length(load_String_value(str, set_ctrl)); 4515 Node* coder = load_String_coder(str, set_ctrl); 4516 // Divide length by 2 if coder is UTF16 4517 return _gvn.transform(new RShiftINode(len, coder)); 4518 } 4519 4520 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { 4521 int value_offset = java_lang_String::value_offset(); 4522 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4523 false, nullptr, Type::Offset(0)); 4524 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4525 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, 4526 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true), 4527 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0)); 4528 Node* p = basic_plus_adr(str, str, value_offset); 4529 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, 4530 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); 4531 return load; 4532 } 4533 4534 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) { 4535 if (!CompactStrings) { 4536 return intcon(java_lang_String::CODER_UTF16); 4537 } 4538 int coder_offset = java_lang_String::coder_offset(); 4539 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4540 false, nullptr, Type::Offset(0)); 4541 const TypePtr* coder_field_type = string_type->add_offset(coder_offset); 4542 4543 Node* p = basic_plus_adr(str, str, coder_offset); 4544 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE, 4545 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); 4546 return load; 4547 } 4548 4549 void GraphKit::store_String_value(Node* str, Node* value) { 4550 int value_offset = java_lang_String::value_offset(); 4551 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4552 false, nullptr, Type::Offset(0)); 4553 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4554 4555 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, 4556 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED); 4557 } 4558 4559 void GraphKit::store_String_coder(Node* str, Node* value) { 4560 int coder_offset = java_lang_String::coder_offset(); 4561 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4562 false, nullptr, Type::Offset(0)); 4563 const TypePtr* coder_field_type = string_type->add_offset(coder_offset); 4564 4565 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, 4566 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED); 4567 } 4568 4569 // Capture src and dst memory state with a MergeMemNode 4570 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) { 4571 if (src_type == dst_type) { 4572 // Types are equal, we don't need a MergeMemNode 4573 return memory(src_type); 4574 } 4575 MergeMemNode* merge = MergeMemNode::make(map()->memory()); 4576 record_for_igvn(merge); // fold it up later, if possible 4577 int src_idx = C->get_alias_index(src_type); 4578 int dst_idx = C->get_alias_index(dst_type); 4579 merge->set_memory_at(src_idx, memory(src_idx)); 4580 merge->set_memory_at(dst_idx, memory(dst_idx)); 4581 return merge; 4582 } 4583 4584 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) { 4585 assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported"); 4586 assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type"); 4587 // If input and output memory types differ, capture both states to preserve 4588 // the dependency between preceding and subsequent loads/stores. 4589 // For example, the following program: 4590 // StoreB 4591 // compress_string 4592 // LoadB 4593 // has this memory graph (use->def): 4594 // LoadB -> compress_string -> CharMem 4595 // ... -> StoreB -> ByteMem 4596 // The intrinsic hides the dependency between LoadB and StoreB, causing 4597 // the load to read from memory not containing the result of the StoreB. 4598 // The correct memory graph should look like this: 4599 // LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem)) 4600 Node* mem = capture_memory(src_type, TypeAryPtr::BYTES); 4601 StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count); 4602 Node* res_mem = _gvn.transform(new SCMemProjNode(_gvn.transform(str))); 4603 set_memory(res_mem, TypeAryPtr::BYTES); 4604 return str; 4605 } 4606 4607 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) { 4608 assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported"); 4609 assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type"); 4610 // Capture src and dst memory (see comment in 'compress_string'). 4611 Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type); 4612 StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count); 4613 set_memory(_gvn.transform(str), dst_type); 4614 } 4615 4616 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) { 4617 /** 4618 * int i_char = start; 4619 * for (int i_byte = 0; i_byte < count; i_byte++) { 4620 * dst[i_char++] = (char)(src[i_byte] & 0xff); 4621 * } 4622 */ 4623 add_parse_predicates(); 4624 C->set_has_loops(true); 4625 4626 RegionNode* head = new RegionNode(3); 4627 head->init_req(1, control()); 4628 gvn().set_type(head, Type::CONTROL); 4629 record_for_igvn(head); 4630 4631 Node* i_byte = new PhiNode(head, TypeInt::INT); 4632 i_byte->init_req(1, intcon(0)); 4633 gvn().set_type(i_byte, TypeInt::INT); 4634 record_for_igvn(i_byte); 4635 4636 Node* i_char = new PhiNode(head, TypeInt::INT); 4637 i_char->init_req(1, start); 4638 gvn().set_type(i_char, TypeInt::INT); 4639 record_for_igvn(i_char); 4640 4641 Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES); 4642 gvn().set_type(mem, Type::MEMORY); 4643 record_for_igvn(mem); 4644 set_control(head); 4645 set_memory(mem, TypeAryPtr::BYTES); 4646 Node* ch = load_array_element(src, i_byte, TypeAryPtr::BYTES, /* set_ctrl */ true); 4647 Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE), 4648 AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered, 4649 false, false, true /* mismatched */); 4650 4651 IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN); 4652 head->init_req(2, IfTrue(iff)); 4653 mem->init_req(2, st); 4654 i_byte->init_req(2, AddI(i_byte, intcon(1))); 4655 i_char->init_req(2, AddI(i_char, intcon(2))); 4656 4657 set_control(IfFalse(iff)); 4658 set_memory(st, TypeAryPtr::BYTES); 4659 } 4660 4661 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) { 4662 if (!field->is_constant()) { 4663 return nullptr; // Field not marked as constant. 4664 } 4665 ciInstance* holder = nullptr; 4666 if (!field->is_static()) { 4667 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop(); 4668 if (const_oop != nullptr && const_oop->is_instance()) { 4669 holder = const_oop->as_instance(); 4670 } 4671 } 4672 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), 4673 /*is_unsigned_load=*/false); 4674 if (con_type != nullptr) { 4675 Node* con = makecon(con_type); 4676 if (field->type()->is_inlinetype()) { 4677 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free()); 4678 } else if (con_type->is_inlinetypeptr()) { 4679 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free()); 4680 } 4681 return con; 4682 } 4683 return nullptr; 4684 } 4685 4686 //---------------------------load_mirror_from_klass---------------------------- 4687 // Given a klass oop, load its java mirror (a java.lang.Class oop). 4688 Node* GraphKit::load_mirror_from_klass(Node* klass) { 4689 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); 4690 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); 4691 // mirror = ((OopHandle)mirror)->resolve(); 4692 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); 4693 }