1 /* 2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_GRAPHKIT_HPP 26 #define SHARE_OPTO_GRAPHKIT_HPP 27 28 #include "ci/ciEnv.hpp" 29 #include "ci/ciMethodData.hpp" 30 #include "gc/shared/c2/barrierSetC2.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/compile.hpp" 35 #include "opto/divnode.hpp" 36 #include "opto/mulnode.hpp" 37 #include "opto/phaseX.hpp" 38 #include "opto/subnode.hpp" 39 #include "opto/type.hpp" 40 #include "runtime/deoptimization.hpp" 41 42 class BarrierSetC2; 43 class FastLockNode; 44 class FastUnlockNode; 45 class IdealKit; 46 class LibraryCallKit; 47 class Parse; 48 class RootNode; 49 50 //----------------------------------------------------------------------------- 51 //----------------------------GraphKit----------------------------------------- 52 // Toolkit for building the common sorts of subgraphs. 53 // Does not know about bytecode parsing or type-flow results. 54 // It is able to create graphs implementing the semantics of most 55 // or all bytecodes, so that it can expand intrinsics and calls. 56 // It may depend on JVMState structure, but it must not depend 57 // on specific bytecode streams. 58 class GraphKit : public Phase { 59 friend class PreserveJVMState; 60 61 protected: 62 ciEnv* _env; // Compilation environment 63 PhaseGVN &_gvn; // Some optimizations while parsing 64 SafePointNode* _map; // Parser map from JVM to Nodes 65 SafePointNode* _exceptions;// Parser map(s) for exception state(s) 66 int _bci; // JVM Bytecode Pointer 67 ciMethod* _method; // JVM Current Method 68 BarrierSetC2* _barrier_set; 69 70 private: 71 int _sp; // JVM Expression Stack Pointer; don't modify directly! 72 73 private: 74 SafePointNode* map_not_null() const { 75 assert(_map != nullptr, "must call stopped() to test for reset compiler map"); 76 return _map; 77 } 78 79 public: 80 GraphKit(); // empty constructor 81 GraphKit(JVMState* jvms); // the JVM state on which to operate 82 83 #ifdef ASSERT 84 ~GraphKit() { 85 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); 86 } 87 #endif 88 89 virtual Parse* is_Parse() const { return nullptr; } 90 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; } 91 92 ciEnv* env() const { return _env; } 93 PhaseGVN& gvn() const { return _gvn; } 94 void* barrier_set_state() const { return C->barrier_set_state(); } 95 96 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile 97 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); } 98 99 // Handy well-known nodes: 100 Node* null() const { return zerocon(T_OBJECT); } 101 Node* top() const { return C->top(); } 102 RootNode* root() const { return C->root(); } 103 104 // Create or find a constant node 105 Node* intcon(jint con) const { return _gvn.intcon(con); } 106 Node* longcon(jlong con) const { return _gvn.longcon(con); } 107 Node* integercon(jlong con, BasicType bt) const { 108 if (bt == T_INT) { 109 return intcon(checked_cast<jint>(con)); 110 } 111 assert(bt == T_LONG, "basic type not an int or long"); 112 return longcon(con); 113 } 114 Node* makecon(const Type *t) const { return _gvn.makecon(t); } 115 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } 116 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) 117 118 jint find_int_con(Node* n, jint value_if_unknown) { 119 return _gvn.find_int_con(n, value_if_unknown); 120 } 121 jlong find_long_con(Node* n, jlong value_if_unknown) { 122 return _gvn.find_long_con(n, value_if_unknown); 123 } 124 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.) 125 126 // JVM State accessors: 127 // Parser mapping from JVM indices into Nodes. 128 // Low slots are accessed by the StartNode::enum. 129 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals(); 130 // Then come JVM stack slots. 131 // Finally come the monitors, if any. 132 // See layout accessors in class JVMState. 133 134 SafePointNode* map() const { return _map; } 135 bool has_exceptions() const { return _exceptions != nullptr; } 136 JVMState* jvms() const { return map_not_null()->_jvms; } 137 int sp() const { return _sp; } 138 int bci() const { return _bci; } 139 Bytecodes::Code java_bc() const; 140 ciMethod* method() const { return _method; } 141 142 void set_jvms(JVMState* jvms) { set_map(jvms->map()); 143 assert(jvms == this->jvms(), "sanity"); 144 _sp = jvms->sp(); 145 _bci = jvms->bci(); 146 _method = jvms->has_method() ? jvms->method() : nullptr; } 147 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } 148 void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; } 149 void clean_stack(int from_sp); // clear garbage beyond from_sp to top 150 151 void inc_sp(int i) { set_sp(sp() + i); } 152 void dec_sp(int i) { set_sp(sp() - i); } 153 void set_bci(int bci) { _bci = bci; } 154 155 // Make sure jvms has current bci & sp. 156 JVMState* sync_jvms() const; 157 JVMState* sync_jvms_for_reexecute(); 158 159 #ifdef ASSERT 160 // Make sure JVMS has an updated copy of bci and sp. 161 // Also sanity-check method, depth, and monitor depth. 162 bool jvms_in_sync() const; 163 164 // Make sure the map looks OK. 165 void verify_map() const; 166 167 // Make sure a proposed exception state looks OK. 168 static void verify_exception_state(SafePointNode* ex_map); 169 #endif 170 171 // Clone the existing map state. (Implements PreserveJVMState.) 172 SafePointNode* clone_map(); 173 174 // Reverses the work done by clone_map(). Should only be used when the node returned by 175 // clone_map() is ultimately not used. Calling Node::destruct directly in the previously 176 // mentioned circumstance instead of this method may result in use-after-free. 177 void destruct_map_clone(SafePointNode* sfp); 178 179 // Set the map to a clone of the given one. 180 void set_map_clone(SafePointNode* m); 181 182 // Tell if the compilation is failing. 183 bool failing() const { return C->failing(); } 184 185 // Set _map to null, signalling a stop to further bytecode execution. 186 // Preserve the map intact for future use, and return it back to the caller. 187 SafePointNode* stop() { SafePointNode* m = map(); set_map(nullptr); return m; } 188 189 // Stop, but first smash the map's inputs to null, to mark it dead. 190 void stop_and_kill_map(); 191 192 // Tell if _map is null, or control is top. 193 bool stopped(); 194 195 // Tell if this method or any caller method has exception handlers. 196 bool has_ex_handler(); 197 198 // Save an exception without blowing stack contents or other JVM state. 199 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.) 200 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop); 201 202 // Recover a saved exception from its map. 203 static Node* saved_ex_oop(SafePointNode* ex_map); 204 205 // Recover a saved exception from its map, and remove it from the map. 206 static Node* clear_saved_ex_oop(SafePointNode* ex_map); 207 208 #ifdef ASSERT 209 // Recover a saved exception from its map, and remove it from the map. 210 static bool has_saved_ex_oop(SafePointNode* ex_map); 211 #endif 212 213 // Push an exception in the canonical position for handlers (stack(0)). 214 void push_ex_oop(Node* ex_oop) { 215 ensure_stack(1); // ensure room to push the exception 216 set_stack(0, ex_oop); 217 set_sp(1); 218 clean_stack(1); 219 } 220 221 // Detach and return an exception state. 222 SafePointNode* pop_exception_state() { 223 SafePointNode* ex_map = _exceptions; 224 if (ex_map != nullptr) { 225 _exceptions = ex_map->next_exception(); 226 ex_map->set_next_exception(nullptr); 227 debug_only(verify_exception_state(ex_map)); 228 } 229 return ex_map; 230 } 231 232 // Add an exception, using the given JVM state, without commoning. 233 void push_exception_state(SafePointNode* ex_map) { 234 debug_only(verify_exception_state(ex_map)); 235 ex_map->set_next_exception(_exceptions); 236 _exceptions = ex_map; 237 } 238 239 // Turn the current JVM state into an exception state, appending the ex_oop. 240 SafePointNode* make_exception_state(Node* ex_oop); 241 242 // Add an exception, using the given JVM state. 243 // Combine all exceptions with a common exception type into a single state. 244 // (This is done via combine_exception_states.) 245 void add_exception_state(SafePointNode* ex_map); 246 247 // Combine all exceptions of any sort whatever into a single master state. 248 SafePointNode* combine_and_pop_all_exception_states() { 249 if (_exceptions == nullptr) return nullptr; 250 SafePointNode* phi_map = pop_exception_state(); 251 SafePointNode* ex_map; 252 while ((ex_map = pop_exception_state()) != nullptr) { 253 combine_exception_states(ex_map, phi_map); 254 } 255 return phi_map; 256 } 257 258 // Combine the two exception states, building phis as necessary. 259 // The second argument is updated to include contributions from the first. 260 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map); 261 262 // Reset the map to the given state. If there are any half-finished phis 263 // in it (created by combine_exception_states), transform them now. 264 // Returns the exception oop. (Caller must call push_ex_oop if required.) 265 Node* use_exception_state(SafePointNode* ex_map); 266 267 // Collect exceptions from a given JVM state into my exception list. 268 void add_exception_states_from(JVMState* jvms); 269 270 // Collect all raised exceptions into the current JVM state. 271 // Clear the current exception list and map, returns the combined states. 272 JVMState* transfer_exceptions_into_jvms(); 273 274 // Helper to throw a built-in exception. 275 // The JVMS must allow the bytecode to be re-executed via an uncommon trap. 276 void builtin_throw(Deoptimization::DeoptReason reason); 277 278 // Helper to check the JavaThread::_should_post_on_exceptions flag 279 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw) 280 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, 281 bool must_throw) ; 282 283 // Helper Functions for adding debug information 284 void kill_dead_locals(); 285 #ifdef ASSERT 286 bool dead_locals_are_killed(); 287 #endif 288 // The call may deoptimize. Supply required JVM state as debug info. 289 // If must_throw is true, the call is guaranteed not to return normally. 290 void add_safepoint_edges(SafePointNode* call, 291 bool must_throw = false); 292 293 // How many stack inputs does the current BC consume? 294 // And, how does the stack change after the bytecode? 295 // Returns false if unknown. 296 bool compute_stack_effects(int& inputs, int& depth); 297 298 // Add a fixed offset to a pointer 299 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { 300 return basic_plus_adr(base, ptr, MakeConX(offset)); 301 } 302 Node* basic_plus_adr(Node* base, intptr_t offset) { 303 return basic_plus_adr(base, base, MakeConX(offset)); 304 } 305 // Add a variable offset to a pointer 306 Node* basic_plus_adr(Node* base, Node* offset) { 307 return basic_plus_adr(base, base, offset); 308 } 309 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); 310 311 312 // Some convenient shortcuts for common nodes 313 Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); } 314 Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); } 315 316 Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); } 317 Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); } 318 Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); } 319 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); } 320 321 Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); } 322 Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); } 323 Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); } 324 325 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); } 326 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); } 327 328 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); } 329 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); } 330 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); } 331 332 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); } 333 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); } 334 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); } 335 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); } 336 337 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); } 338 339 // Convert between int and long, and size_t. 340 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) 341 Node* ConvI2L(Node* offset); 342 Node* ConvI2UL(Node* offset); 343 Node* ConvL2I(Node* offset); 344 // Find out the klass of an object. 345 Node* load_object_klass(Node* object); 346 // Find out the length of an array. 347 Node* load_array_length(Node* array); 348 // Cast array allocation's length as narrow as possible. 349 // If replace_length_in_map is true, replace length with CastIINode in map. 350 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length 351 Node* array_ideal_length(AllocateArrayNode* alloc, 352 const TypeOopPtr* oop_type, 353 bool replace_length_in_map); 354 355 356 // Helper function to do a null pointer check or ZERO check based on type. 357 // Throw an exception if a given value is null. 358 // Return the value cast to not-null. 359 // Be clever about equivalent dominating null checks. 360 Node* null_check_common(Node* value, BasicType type, 361 bool assert_null = false, 362 Node* *null_control = nullptr, 363 bool speculative = false); 364 Node* null_check(Node* value, BasicType type = T_OBJECT) { 365 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null()); 366 } 367 Node* null_check_receiver() { 368 assert(argument(0)->bottom_type()->isa_ptr(), "must be"); 369 return null_check(argument(0)); 370 } 371 Node* zero_check_int(Node* value) { 372 assert(value->bottom_type()->basic_type() == T_INT, 373 "wrong type: %s", type2name(value->bottom_type()->basic_type())); 374 return null_check_common(value, T_INT); 375 } 376 Node* zero_check_long(Node* value) { 377 assert(value->bottom_type()->basic_type() == T_LONG, 378 "wrong type: %s", type2name(value->bottom_type()->basic_type())); 379 return null_check_common(value, T_LONG); 380 } 381 // Throw an uncommon trap if a given value is __not__ null. 382 // Return the value cast to null, and be clever about dominating checks. 383 Node* null_assert(Node* value, BasicType type = T_OBJECT) { 384 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null()); 385 } 386 387 // Check if value is null and abort if it is 388 Node* must_be_not_null(Node* value, bool do_replace_in_map); 389 390 // Null check oop. Return null-path control into (*null_control). 391 // Return a cast-not-null node which depends on the not-null control. 392 // If never_see_null, use an uncommon trap (*null_control sees a top). 393 // The cast is not valid along the null path; keep a copy of the original. 394 // If safe_for_replace, then we can replace the value with the cast 395 // in the parsing map (the cast is guaranteed to dominate the map) 396 Node* null_check_oop(Node* value, Node* *null_control, 397 bool never_see_null = false, 398 bool safe_for_replace = false, 399 bool speculative = false); 400 401 // Check the null_seen bit. 402 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating); 403 404 void guard_klass_being_initialized(Node* klass); 405 void guard_init_thread(Node* klass); 406 407 void clinit_barrier(ciInstanceKlass* ik, ciMethod* context); 408 409 // Check for unique class for receiver at call 410 ciKlass* profile_has_unique_klass() { 411 ciCallProfile profile = method()->call_profile_at_bci(bci()); 412 if (profile.count() >= 0 && // no cast failures here 413 profile.has_receiver(0) && 414 profile.morphism() == 1) { 415 return profile.receiver(0); 416 } 417 return nullptr; 418 } 419 420 // record type from profiling with the type system 421 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind); 422 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc); 423 void record_profiled_parameters_for_speculation(); 424 void record_profiled_return_for_speculation(); 425 Node* record_profiled_receiver_for_speculation(Node* n); 426 427 // Use the type profile to narrow an object type. 428 Node* maybe_cast_profiled_receiver(Node* not_null_obj, 429 const TypeKlassPtr* require_klass, 430 ciKlass* spec, 431 bool safe_for_replace); 432 433 // Cast obj to type and emit guard unless we had too many traps here already 434 Node* maybe_cast_profiled_obj(Node* obj, 435 ciKlass* type, 436 bool not_null = false); 437 438 // Cast obj to not-null on this path 439 Node* cast_not_null(Node* obj, bool do_replace_in_map = true); 440 // Replace all occurrences of one node by another. 441 void replace_in_map(Node* old, Node* neww); 442 443 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); } 444 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); } 445 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); } 446 447 void push_pair(Node* ldval) { 448 push(ldval); 449 push(top()); // the halfword is merely a placeholder 450 } 451 void push_pair_local(int i) { 452 // longs are stored in locals in "push" order 453 push( local(i+0) ); // the real value 454 assert(local(i+1) == top(), ""); 455 push(top()); // halfword placeholder 456 } 457 Node* pop_pair() { 458 // the second half is pushed last & popped first; it contains exactly nothing 459 Node* halfword = pop(); 460 assert(halfword == top(), ""); 461 // the long bits are pushed first & popped last: 462 return pop(); 463 } 464 void set_pair_local(int i, Node* lval) { 465 // longs are stored in locals as a value/half pair (like doubles) 466 set_local(i+0, lval); 467 set_local(i+1, top()); 468 } 469 470 // Push the node, which may be zero, one, or two words. 471 void push_node(BasicType n_type, Node* n) { 472 int n_size = type2size[n_type]; 473 if (n_size == 1) push( n ); // T_INT, ... 474 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG 475 else { assert(n_size == 0, "must be T_VOID"); } 476 } 477 478 Node* pop_node(BasicType n_type) { 479 int n_size = type2size[n_type]; 480 if (n_size == 1) return pop(); 481 else if (n_size == 2) return pop_pair(); 482 else return nullptr; 483 } 484 485 Node* control() const { return map_not_null()->control(); } 486 Node* i_o() const { return map_not_null()->i_o(); } 487 Node* returnadr() const { return map_not_null()->returnadr(); } 488 Node* frameptr() const { return map_not_null()->frameptr(); } 489 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); } 490 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); } 491 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); } 492 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); } 493 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); } 494 495 void set_control (Node* c) { map_not_null()->set_control(c); } 496 void set_i_o (Node* c) { map_not_null()->set_i_o(c); } 497 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); } 498 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); } 499 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); } 500 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); } 501 502 // Access unaliased memory 503 Node* memory(uint alias_idx); 504 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); } 505 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); } 506 507 // Access immutable memory 508 Node* immutable_memory() { return C->immutable_memory(); } 509 510 // Set unaliased memory 511 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); } 512 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); } 513 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); } 514 515 // Get the entire memory state (probably a MergeMemNode), and reset it 516 // (The resetting prevents somebody from using the dangling Node pointer.) 517 Node* reset_memory(); 518 519 // Get the entire memory state, asserted to be a MergeMemNode. 520 MergeMemNode* merged_memory() { 521 Node* mem = map_not_null()->memory(); 522 assert(mem->is_MergeMem(), "parse memory is always pre-split"); 523 return mem->as_MergeMem(); 524 } 525 526 // Set the entire memory state; produce a new MergeMemNode. 527 void set_all_memory(Node* newmem); 528 529 // Create a memory projection from the call, then set_all_memory. 530 void set_all_memory_call(Node* call, bool separate_io_proj = false); 531 532 // Create a LoadNode, reading from the parser's memory state. 533 // (Note: require_atomic_access is useful only with T_LONG.) 534 // 535 // We choose the unordered semantics by default because we have 536 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case 537 // of volatile fields. 538 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 539 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 540 bool require_atomic_access = false, bool unaligned = false, 541 bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) { 542 // This version computes alias_index from bottom_type 543 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), 544 mo, control_dependency, require_atomic_access, 545 unaligned, mismatched, unsafe, barrier_data); 546 } 547 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, 548 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 549 bool require_atomic_access = false, bool unaligned = false, 550 bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) { 551 // This version computes alias_index from an address type 552 assert(adr_type != nullptr, "use other make_load factory"); 553 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), 554 mo, control_dependency, require_atomic_access, 555 unaligned, mismatched, unsafe, barrier_data); 556 } 557 // This is the base version which is given an alias index. 558 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, 559 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 560 bool require_atomic_access = false, bool unaligned = false, 561 bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0); 562 563 // Create & transform a StoreNode and store the effect into the 564 // parser's memory state. 565 // 566 // We must ensure that stores of object references will be visible 567 // only after the object's initialization. So the clients of this 568 // procedure must indicate that the store requires `release' 569 // semantics, if the stored value is an object reference that might 570 // point to a new object and may become externally visible. 571 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 572 const TypePtr* adr_type, 573 MemNode::MemOrd mo, 574 bool require_atomic_access = false, 575 bool unaligned = false, 576 bool mismatched = false, 577 bool unsafe = false, 578 int barrier_data = 0) { 579 // This version computes alias_index from an address type 580 assert(adr_type != nullptr, "use other store_to_memory factory"); 581 return store_to_memory(ctl, adr, val, bt, 582 C->get_alias_index(adr_type), 583 mo, require_atomic_access, 584 unaligned, mismatched, unsafe, 585 barrier_data); 586 } 587 // This is the base version which is given alias index 588 // Return the new StoreXNode 589 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 590 int adr_idx, 591 MemNode::MemOrd, 592 bool require_atomic_access = false, 593 bool unaligned = false, 594 bool mismatched = false, 595 bool unsafe = false, 596 int barrier_data = 0); 597 598 // Perform decorated accesses 599 600 Node* access_store_at(Node* obj, // containing obj 601 Node* adr, // actual address to store val at 602 const TypePtr* adr_type, 603 Node* val, 604 const Type* val_type, 605 BasicType bt, 606 DecoratorSet decorators); 607 608 Node* access_load_at(Node* obj, // containing obj 609 Node* adr, // actual address to load val at 610 const TypePtr* adr_type, 611 const Type* val_type, 612 BasicType bt, 613 DecoratorSet decorators); 614 615 Node* access_load(Node* adr, // actual address to load val at 616 const Type* val_type, 617 BasicType bt, 618 DecoratorSet decorators); 619 620 Node* access_atomic_cmpxchg_val_at(Node* obj, 621 Node* adr, 622 const TypePtr* adr_type, 623 int alias_idx, 624 Node* expected_val, 625 Node* new_val, 626 const Type* value_type, 627 BasicType bt, 628 DecoratorSet decorators); 629 630 Node* access_atomic_cmpxchg_bool_at(Node* obj, 631 Node* adr, 632 const TypePtr* adr_type, 633 int alias_idx, 634 Node* expected_val, 635 Node* new_val, 636 const Type* value_type, 637 BasicType bt, 638 DecoratorSet decorators); 639 640 Node* access_atomic_xchg_at(Node* obj, 641 Node* adr, 642 const TypePtr* adr_type, 643 int alias_idx, 644 Node* new_val, 645 const Type* value_type, 646 BasicType bt, 647 DecoratorSet decorators); 648 649 Node* access_atomic_add_at(Node* obj, 650 Node* adr, 651 const TypePtr* adr_type, 652 int alias_idx, 653 Node* new_val, 654 const Type* value_type, 655 BasicType bt, 656 DecoratorSet decorators); 657 658 void access_clone(Node* src, Node* dst, Node* size, bool is_array); 659 660 // Return addressing for an array element. 661 Node* array_element_address(Node* ary, Node* idx, BasicType elembt, 662 // Optional constraint on the array size: 663 const TypeInt* sizetype = nullptr, 664 // Optional control dependency (for example, on range check) 665 Node* ctrl = nullptr); 666 667 // Return a load of array element at idx. 668 Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl); 669 670 //---------------- Dtrace support -------------------- 671 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry); 672 void make_dtrace_method_entry(ciMethod* method) { 673 make_dtrace_method_entry_exit(method, true); 674 } 675 void make_dtrace_method_exit(ciMethod* method) { 676 make_dtrace_method_entry_exit(method, false); 677 } 678 679 //--------------- stub generation ------------------- 680 public: 681 void gen_stub(address C_function, 682 const char *name, 683 int is_fancy_jump, 684 bool pass_tls, 685 bool return_pc); 686 687 //---------- help for generating calls -------------- 688 689 // Do a null check on the receiver as it would happen before the call to 690 // callee (with all arguments still on the stack). 691 Node* null_check_receiver_before_call(ciMethod* callee) { 692 assert(!callee->is_static(), "must be a virtual method"); 693 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 694 // Use callsite signature always. 695 ciMethod* declared_method = method()->get_method_at_bci(bci()); 696 const int nargs = declared_method->arg_size(); 697 inc_sp(nargs); 698 Node* n = null_check_receiver(); 699 dec_sp(nargs); 700 return n; 701 } 702 703 // Fill in argument edges for the call from argument(0), argument(1), ... 704 // (The next step is to call set_edges_for_java_call.) 705 void set_arguments_for_java_call(CallJavaNode* call); 706 707 // Fill in non-argument edges for the call. 708 // Transform the call, and update the basics: control, i_o, memory. 709 // (The next step is usually to call set_results_for_java_call.) 710 void set_edges_for_java_call(CallJavaNode* call, 711 bool must_throw = false, bool separate_io_proj = false); 712 713 // Finish up a java call that was started by set_edges_for_java_call. 714 // Call add_exception on any throw arising from the call. 715 // Return the call result (transformed). 716 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false); 717 718 // Similar to set_edges_for_java_call, but simplified for runtime calls. 719 void set_predefined_output_for_runtime_call(Node* call) { 720 set_predefined_output_for_runtime_call(call, nullptr, nullptr); 721 } 722 void set_predefined_output_for_runtime_call(Node* call, 723 Node* keep_mem, 724 const TypePtr* hook_mem); 725 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr); 726 727 // Replace the call with the current state of the kit. Requires 728 // that the call was generated with separate io_projs so that 729 // exceptional control flow can be handled properly. 730 void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false); 731 732 // helper functions for statistics 733 void increment_counter(address counter_addr); // increment a debug counter 734 void increment_counter(Node* counter_addr); // increment a debug counter 735 736 // Bail out to the interpreter right now 737 // The optional klass is the one causing the trap. 738 // The optional reason is debug information written to the compile log. 739 // Optional must_throw is the same as with add_safepoint_edges. 740 Node* uncommon_trap(int trap_request, 741 ciKlass* klass = nullptr, const char* reason_string = nullptr, 742 bool must_throw = false, bool keep_exact_action = false); 743 744 // Shorthand, to avoid saying "Deoptimization::" so many times. 745 Node* uncommon_trap(Deoptimization::DeoptReason reason, 746 Deoptimization::DeoptAction action, 747 ciKlass* klass = nullptr, const char* reason_string = nullptr, 748 bool must_throw = false, bool keep_exact_action = false) { 749 return uncommon_trap(Deoptimization::make_trap_request(reason, action), 750 klass, reason_string, must_throw, keep_exact_action); 751 } 752 753 // Bail out to the interpreter and keep exact action (avoid switching to Action_none). 754 Node* uncommon_trap_exact(Deoptimization::DeoptReason reason, 755 Deoptimization::DeoptAction action, 756 ciKlass* klass = nullptr, const char* reason_string = nullptr, 757 bool must_throw = false) { 758 return uncommon_trap(Deoptimization::make_trap_request(reason, action), 759 klass, reason_string, must_throw, /*keep_exact_action=*/true); 760 } 761 762 // SP when bytecode needs to be reexecuted. 763 virtual int reexecute_sp() { return sp(); } 764 765 // Report if there were too many traps at the current method and bci. 766 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 767 // If there is no MDO at all, report no trap unless told to assume it. 768 bool too_many_traps(Deoptimization::DeoptReason reason) { 769 return C->too_many_traps(method(), bci(), reason); 770 } 771 772 // Report if there were too many recompiles at the current method and bci. 773 bool too_many_recompiles(Deoptimization::DeoptReason reason) { 774 return C->too_many_recompiles(method(), bci(), reason); 775 } 776 777 bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) { 778 return C->too_many_traps_or_recompiles(method(), bci(), reason); 779 } 780 781 // Returns the object (if any) which was created the moment before. 782 Node* just_allocated_object(Node* current_control); 783 784 // Sync Ideal and Graph kits. 785 void sync_kit(IdealKit& ideal); 786 void final_sync(IdealKit& ideal); 787 788 public: 789 // Helper function to round double arguments before a call 790 void round_double_arguments(ciMethod* dest_method); 791 792 // rounding for strict float precision conformance 793 Node* precision_rounding(Node* n); 794 795 // rounding for strict double precision conformance 796 Node* dprecision_rounding(Node* n); 797 798 // Helper functions for fast/slow path codes 799 Node* opt_iff(Node* region, Node* iff); 800 Node* make_runtime_call(int flags, 801 const TypeFunc* call_type, address call_addr, 802 const char* call_name, 803 const TypePtr* adr_type, // null if no memory effects 804 Node* parm0 = nullptr, Node* parm1 = nullptr, 805 Node* parm2 = nullptr, Node* parm3 = nullptr, 806 Node* parm4 = nullptr, Node* parm5 = nullptr, 807 Node* parm6 = nullptr, Node* parm7 = nullptr); 808 809 Node* sign_extend_byte(Node* in); 810 Node* sign_extend_short(Node* in); 811 812 enum { // flag values for make_runtime_call 813 RC_NO_FP = 1, // CallLeafNoFPNode 814 RC_NO_IO = 2, // do not hook IO edges 815 RC_NO_LEAF = 4, // CallStaticJavaNode 816 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges 817 RC_NARROW_MEM = 16, // input memory is same as output 818 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap 819 RC_VECTOR = 64, // CallLeafVectorNode 820 RC_LEAF = 0 // null value: no flags set 821 }; 822 823 // merge in all memory slices from new_mem, along the given path 824 void merge_memory(Node* new_mem, Node* region, int new_path); 825 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false); 826 827 // Helper functions to build synchronizations 828 int next_monitor(); 829 Node* insert_mem_bar(int opcode, Node* precedent = nullptr); 830 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr); 831 // Optional 'precedent' is appended as an extra edge, to force ordering. 832 FastLockNode* shared_lock(Node* obj); 833 void shared_unlock(Node* box, Node* obj); 834 835 // helper functions for the fast path/slow path idioms 836 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result); 837 838 // Generate an instance-of idiom. Used by both the instance-of bytecode 839 // and the reflective instance-of call. 840 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); 841 842 // Generate a check-cast idiom. Used by both the check-cast bytecode 843 // and the array-store bytecode 844 Node* gen_checkcast( Node *subobj, Node* superkls, 845 Node* *failure_control = nullptr ); 846 847 Node* gen_subtype_check(Node* obj, Node* superklass); 848 849 // Exact type check used for predicted calls and casts. 850 // Rewrites (*casted_receiver) to be casted to the stronger type. 851 // (Caller is responsible for doing replace_in_map.) 852 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, 853 Node* *casted_receiver); 854 855 // Inexact type check used for predicted calls. 856 Node* subtype_check_receiver(Node* receiver, ciKlass* klass, 857 Node** casted_receiver); 858 859 // implementation of object creation 860 Node* set_output_for_allocation(AllocateNode* alloc, 861 const TypeOopPtr* oop_type, 862 bool deoptimize_on_exception=false); 863 Node* get_layout_helper(Node* klass_node, jint& constant_value); 864 Node* new_instance(Node* klass_node, 865 Node* slow_test = nullptr, 866 Node* *return_size_val = nullptr, 867 bool deoptimize_on_exception = false); 868 Node* new_array(Node* klass_node, Node* count_val, int nargs, 869 Node* *return_size_val = nullptr, 870 bool deoptimize_on_exception = false); 871 872 // java.lang.String helpers 873 Node* load_String_length(Node* str, bool set_ctrl); 874 Node* load_String_value(Node* str, bool set_ctrl); 875 Node* load_String_coder(Node* str, bool set_ctrl); 876 void store_String_value(Node* str, Node* value); 877 void store_String_coder(Node* str, Node* value); 878 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type); 879 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count); 880 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count); 881 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count); 882 883 // Handy for making control flow 884 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { 885 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's 886 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time 887 // Place 'if' on worklist if it will be in graph 888 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 889 return iff; 890 } 891 892 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) { 893 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's 894 _gvn.transform(iff); // Value may be known at parse-time 895 // Place 'if' on worklist if it will be in graph 896 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 897 return iff; 898 } 899 900 void add_parse_predicates(int nargs = 0); 901 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs); 902 903 Node* make_constant_from_field(ciField* field, Node* obj); 904 905 // Vector API support (implemented in vectorIntrinsics.cpp) 906 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false); 907 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool shuffle_to_vector = false); 908 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem); 909 }; 910 911 // Helper class to support building of control flow branches. Upon 912 // creation the map and sp at bci are cloned and restored upon de- 913 // struction. Typical use: 914 // 915 // { PreserveJVMState pjvms(this); 916 // // code of new branch 917 // } 918 // // here the JVM state at bci is established 919 920 class PreserveJVMState: public StackObj { 921 protected: 922 GraphKit* _kit; 923 #ifdef ASSERT 924 int _block; // PO of current block, if a Parse 925 int _bci; 926 #endif 927 SafePointNode* _map; 928 uint _sp; 929 930 public: 931 PreserveJVMState(GraphKit* kit, bool clone_map = true); 932 ~PreserveJVMState(); 933 }; 934 935 // Helper class to build cutouts of the form if (p) ; else {x...}. 936 // The code {x...} must not fall through. 937 // The kit's main flow of control is set to the "then" continuation of if(p). 938 class BuildCutout: public PreserveJVMState { 939 public: 940 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); 941 ~BuildCutout(); 942 }; 943 944 // Helper class to preserve the original _reexecute bit and _sp and restore 945 // them back 946 class PreserveReexecuteState: public StackObj { 947 protected: 948 GraphKit* _kit; 949 uint _sp; 950 JVMState::ReexecuteState _reexecute; 951 952 public: 953 PreserveReexecuteState(GraphKit* kit); 954 ~PreserveReexecuteState(); 955 }; 956 957 #endif // SHARE_OPTO_GRAPHKIT_HPP