1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 #include "utilities/growableArray.hpp" 36 37 // Portions of code courtesy of Clifford Click 38 39 // Optimization - Graph Style 40 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class CallLeafVectorNode; 52 class AllocateNode; 53 class AllocateArrayNode; 54 class AbstractLockNode; 55 class LockNode; 56 class UnlockNode; 57 class FastLockNode; 58 59 //------------------------------StartNode-------------------------------------- 60 // The method start node 61 class StartNode : public MultiNode { 62 virtual bool cmp( const Node &n ) const; 63 virtual uint size_of() const; // Size is bigger 64 public: 65 const TypeTuple *_domain; 66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 67 init_class_id(Class_Start); 68 init_req(0,this); 69 init_req(1,root); 70 } 71 virtual int Opcode() const; 72 virtual bool pinned() const { return true; }; 73 virtual const Type *bottom_type() const; 74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 75 virtual const Type* Value(PhaseGVN* phase) const; 76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 78 virtual const RegMask &in_RegMask(uint) const; 79 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 80 virtual uint ideal_reg() const { return 0; } 81 #ifndef PRODUCT 82 virtual void dump_spec(outputStream *st) const; 83 virtual void dump_compact_spec(outputStream *st) const; 84 #endif 85 }; 86 87 //------------------------------StartOSRNode----------------------------------- 88 // The method start node for on stack replacement code 89 class StartOSRNode : public StartNode { 90 public: 91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 92 virtual int Opcode() const; 93 }; 94 95 96 //------------------------------ParmNode--------------------------------------- 97 // Incoming parameters 98 class ParmNode : public ProjNode { 99 static const char * const names[TypeFunc::Parms+1]; 100 public: 101 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 102 init_class_id(Class_Parm); 103 } 104 virtual int Opcode() const; 105 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 106 virtual uint ideal_reg() const; 107 #ifndef PRODUCT 108 virtual void dump_spec(outputStream *st) const; 109 virtual void dump_compact_spec(outputStream *st) const; 110 #endif 111 }; 112 113 114 //------------------------------ReturnNode------------------------------------- 115 // Return from subroutine node 116 class ReturnNode : public Node { 117 public: 118 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 119 virtual int Opcode() const; 120 virtual bool is_CFG() const { return true; } 121 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 122 virtual bool depends_only_on_test() const { return false; } 123 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 124 virtual const Type* Value(PhaseGVN* phase) const; 125 virtual uint ideal_reg() const { return NotAMachineReg; } 126 virtual uint match_edge(uint idx) const; 127 #ifndef PRODUCT 128 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const; 129 #endif 130 }; 131 132 133 //------------------------------RethrowNode------------------------------------ 134 // Rethrow of exception at call site. Ends a procedure before rethrowing; 135 // ends the current basic block like a ReturnNode. Restores registers and 136 // unwinds stack. Rethrow happens in the caller's method. 137 class RethrowNode : public Node { 138 public: 139 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 140 virtual int Opcode() const; 141 virtual bool is_CFG() const { return true; } 142 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 143 virtual bool depends_only_on_test() const { return false; } 144 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 145 virtual const Type* Value(PhaseGVN* phase) const; 146 virtual uint match_edge(uint idx) const; 147 virtual uint ideal_reg() const { return NotAMachineReg; } 148 #ifndef PRODUCT 149 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const; 150 #endif 151 }; 152 153 154 //------------------------------ForwardExceptionNode--------------------------- 155 // Pop stack frame and jump to StubRoutines::forward_exception_entry() 156 class ForwardExceptionNode : public ReturnNode { 157 public: 158 ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr) 159 : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) { 160 } 161 162 virtual int Opcode() const; 163 }; 164 165 //------------------------------TailCallNode----------------------------------- 166 // Pop stack frame and jump indirect 167 class TailCallNode : public ReturnNode { 168 public: 169 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 170 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 171 init_req(TypeFunc::Parms, target); 172 init_req(TypeFunc::Parms+1, moop); 173 } 174 175 virtual int Opcode() const; 176 virtual uint match_edge(uint idx) const; 177 }; 178 179 //------------------------------TailJumpNode----------------------------------- 180 // Pop stack frame and jump indirect 181 class TailJumpNode : public ReturnNode { 182 public: 183 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 184 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 185 init_req(TypeFunc::Parms, target); 186 init_req(TypeFunc::Parms+1, ex_oop); 187 } 188 189 virtual int Opcode() const; 190 virtual uint match_edge(uint idx) const; 191 }; 192 193 //-------------------------------JVMState------------------------------------- 194 // A linked list of JVMState nodes captures the whole interpreter state, 195 // plus GC roots, for all active calls at some call site in this compilation 196 // unit. (If there is no inlining, then the list has exactly one link.) 197 // This provides a way to map the optimized program back into the interpreter, 198 // or to let the GC mark the stack. 199 class JVMState : public ResourceObj { 200 public: 201 typedef enum { 202 Reexecute_Undefined = -1, // not defined -- will be translated into false later 203 Reexecute_False = 0, // false -- do not reexecute 204 Reexecute_True = 1 // true -- reexecute the bytecode 205 } ReexecuteState; //Reexecute State 206 207 private: 208 JVMState* _caller; // List pointer for forming scope chains 209 uint _depth; // One more than caller depth, or one. 210 uint _locoff; // Offset to locals in input edge mapping 211 uint _stkoff; // Offset to stack in input edge mapping 212 uint _monoff; // Offset to monitors in input edge mapping 213 uint _scloff; // Offset to fields of scalar objs in input edge mapping 214 uint _endoff; // Offset to end of input edge mapping 215 uint _sp; // Java Expression Stack Pointer for this state 216 int _bci; // Byte Code Index of this JVM point 217 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 218 ciMethod* _method; // Method Pointer 219 SafePointNode* _map; // Map node associated with this scope 220 public: 221 friend class Compile; 222 friend class PreserveReexecuteState; 223 224 // Because JVMState objects live over the entire lifetime of the 225 // Compile object, they are allocated into the comp_arena, which 226 // does not get resource marked or reset during the compile process 227 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 228 void operator delete( void * ) { } // fast deallocation 229 230 // Create a new JVMState, ready for abstract interpretation. 231 JVMState(ciMethod* method, JVMState* caller); 232 JVMState(int stack_size); // root state; has a null method 233 234 // Access functions for the JVM 235 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 236 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 237 uint locoff() const { return _locoff; } 238 uint stkoff() const { return _stkoff; } 239 uint argoff() const { return _stkoff + _sp; } 240 uint monoff() const { return _monoff; } 241 uint scloff() const { return _scloff; } 242 uint endoff() const { return _endoff; } 243 uint oopoff() const { return debug_end(); } 244 245 int loc_size() const { return stkoff() - locoff(); } 246 int stk_size() const { return monoff() - stkoff(); } 247 int mon_size() const { return scloff() - monoff(); } 248 int scl_size() const { return endoff() - scloff(); } 249 250 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 251 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 252 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 253 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 254 255 uint sp() const { return _sp; } 256 int bci() const { return _bci; } 257 bool should_reexecute() const { return _reexecute==Reexecute_True; } 258 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 259 bool has_method() const { return _method != nullptr; } 260 ciMethod* method() const { assert(has_method(), ""); return _method; } 261 JVMState* caller() const { return _caller; } 262 SafePointNode* map() const { return _map; } 263 uint depth() const { return _depth; } 264 uint debug_start() const; // returns locoff of root caller 265 uint debug_end() const; // returns endoff of self 266 uint debug_size() const { 267 return loc_size() + sp() + mon_size() + scl_size(); 268 } 269 uint debug_depth() const; // returns sum of debug_size values at all depths 270 271 // Returns the JVM state at the desired depth (1 == root). 272 JVMState* of_depth(int d) const; 273 274 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 275 bool same_calls_as(const JVMState* that) const; 276 277 // Monitors (monitors are stored as (boxNode, objNode) pairs 278 enum { logMonitorEdges = 1 }; 279 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 280 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 281 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 282 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 283 bool is_monitor_box(uint off) const { 284 assert(is_mon(off), "should be called only for monitor edge"); 285 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 286 } 287 bool is_monitor_use(uint off) const { return (is_mon(off) 288 && is_monitor_box(off)) 289 || (caller() && caller()->is_monitor_use(off)); } 290 291 // Initialization functions for the JVM 292 void set_locoff(uint off) { _locoff = off; } 293 void set_stkoff(uint off) { _stkoff = off; } 294 void set_monoff(uint off) { _monoff = off; } 295 void set_scloff(uint off) { _scloff = off; } 296 void set_endoff(uint off) { _endoff = off; } 297 void set_offsets(uint off) { 298 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 299 } 300 void set_map(SafePointNode* map) { _map = map; } 301 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode 302 void set_sp(uint sp) { _sp = sp; } 303 // _reexecute is initialized to "undefined" for a new bci 304 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 305 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 306 307 // Miscellaneous utility functions 308 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 309 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 310 void set_map_deep(SafePointNode *map);// reset map for all callers 311 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 312 int interpreter_frame_size() const; 313 314 #ifndef PRODUCT 315 void print_method_with_lineno(outputStream* st, bool show_name) const; 316 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 317 void dump_spec(outputStream *st) const; 318 void dump_on(outputStream* st) const; 319 void dump() const { 320 dump_on(tty); 321 } 322 #endif 323 }; 324 325 //------------------------------SafePointNode---------------------------------- 326 // A SafePointNode is a subclass of a MultiNode for convenience (and 327 // potential code sharing) only - conceptually it is independent of 328 // the Node semantics. 329 class SafePointNode : public MultiNode { 330 friend JVMState; 331 friend class GraphKit; 332 333 virtual bool cmp( const Node &n ) const; 334 virtual uint size_of() const; // Size is bigger 335 336 protected: 337 JVMState* const _jvms; // Pointer to list of JVM State objects 338 // Many calls take *all* of memory as input, 339 // but some produce a limited subset of that memory as output. 340 // The adr_type reports the call's behavior as a store, not a load. 341 const TypePtr* _adr_type; // What type of memory does this node produce? 342 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 343 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States 344 345 void set_jvms(JVMState* s) { 346 assert(s != nullptr, "assign null value to _jvms"); 347 *(JVMState**)&_jvms = s; // override const attribute in the accessor 348 } 349 public: 350 SafePointNode(uint edges, JVMState* jvms, 351 // A plain safepoint advertises no memory effects (null): 352 const TypePtr* adr_type = nullptr) 353 : MultiNode( edges ), 354 _jvms(jvms), 355 _adr_type(adr_type), 356 _has_ea_local_in_scope(false) 357 { 358 init_class_id(Class_SafePoint); 359 } 360 361 JVMState* jvms() const { return _jvms; } 362 virtual bool needs_deep_clone_jvms(Compile* C) { return false; } 363 void clone_jvms(Compile* C) { 364 if (jvms() != nullptr) { 365 if (needs_deep_clone_jvms(C)) { 366 set_jvms(jvms()->clone_deep(C)); 367 jvms()->set_map_deep(this); 368 } else { 369 jvms()->clone_shallow(C)->bind_map(this); 370 } 371 } 372 } 373 374 private: 375 void verify_input(JVMState* jvms, uint idx) const { 376 assert(verify_jvms(jvms), "jvms must match"); 377 Node* n = in(idx); 378 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 379 in(idx + 1)->is_top(), "2nd half of long/double"); 380 } 381 382 public: 383 // Functionality from old debug nodes which has changed 384 Node *local(JVMState* jvms, uint idx) const { 385 verify_input(jvms, jvms->locoff() + idx); 386 return in(jvms->locoff() + idx); 387 } 388 Node *stack(JVMState* jvms, uint idx) const { 389 verify_input(jvms, jvms->stkoff() + idx); 390 return in(jvms->stkoff() + idx); 391 } 392 Node *argument(JVMState* jvms, uint idx) const { 393 verify_input(jvms, jvms->argoff() + idx); 394 return in(jvms->argoff() + idx); 395 } 396 Node *monitor_box(JVMState* jvms, uint idx) const { 397 assert(verify_jvms(jvms), "jvms must match"); 398 return in(jvms->monitor_box_offset(idx)); 399 } 400 Node *monitor_obj(JVMState* jvms, uint idx) const { 401 assert(verify_jvms(jvms), "jvms must match"); 402 return in(jvms->monitor_obj_offset(idx)); 403 } 404 405 void set_local(JVMState* jvms, uint idx, Node *c); 406 407 void set_stack(JVMState* jvms, uint idx, Node *c) { 408 assert(verify_jvms(jvms), "jvms must match"); 409 set_req(jvms->stkoff() + idx, c); 410 } 411 void set_argument(JVMState* jvms, uint idx, Node *c) { 412 assert(verify_jvms(jvms), "jvms must match"); 413 set_req(jvms->argoff() + idx, c); 414 } 415 void ensure_stack(JVMState* jvms, uint stk_size) { 416 assert(verify_jvms(jvms), "jvms must match"); 417 int grow_by = (int)stk_size - (int)jvms->stk_size(); 418 if (grow_by > 0) grow_stack(jvms, grow_by); 419 } 420 void grow_stack(JVMState* jvms, uint grow_by); 421 // Handle monitor stack 422 void push_monitor( const FastLockNode *lock ); 423 void pop_monitor (); 424 Node *peek_monitor_box() const; 425 Node *peek_monitor_obj() const; 426 // Peek Operand Stacks, JVMS 2.6.2 427 Node* peek_operand(uint off = 0) const; 428 429 // Access functions for the JVM 430 Node *control () const { return in(TypeFunc::Control ); } 431 Node *i_o () const { return in(TypeFunc::I_O ); } 432 Node *memory () const { return in(TypeFunc::Memory ); } 433 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 434 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 435 436 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 437 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 438 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 439 440 MergeMemNode* merged_memory() const { 441 return in(TypeFunc::Memory)->as_MergeMem(); 442 } 443 444 // The parser marks useless maps as dead when it's done with them: 445 bool is_killed() { return in(TypeFunc::Control) == nullptr; } 446 447 // Exception states bubbling out of subgraphs such as inlined calls 448 // are recorded here. (There might be more than one, hence the "next".) 449 // This feature is used only for safepoints which serve as "maps" 450 // for JVM states during parsing, intrinsic expansion, etc. 451 SafePointNode* next_exception() const; 452 void set_next_exception(SafePointNode* n); 453 bool has_exceptions() const { return next_exception() != nullptr; } 454 455 // Helper methods to operate on replaced nodes 456 ReplacedNodes replaced_nodes() const { 457 return _replaced_nodes; 458 } 459 460 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 461 _replaced_nodes = replaced_nodes; 462 } 463 464 void clone_replaced_nodes() { 465 _replaced_nodes.clone(); 466 } 467 void record_replaced_node(Node* initial, Node* improved) { 468 _replaced_nodes.record(initial, improved); 469 } 470 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 471 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 472 } 473 void delete_replaced_nodes() { 474 _replaced_nodes.reset(); 475 } 476 void apply_replaced_nodes(uint idx) { 477 _replaced_nodes.apply(this, idx); 478 } 479 void merge_replaced_nodes_with(SafePointNode* sfpt) { 480 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 481 } 482 bool has_replaced_nodes() const { 483 return !_replaced_nodes.is_empty(); 484 } 485 void set_has_ea_local_in_scope(bool b) { 486 _has_ea_local_in_scope = b; 487 } 488 bool has_ea_local_in_scope() const { 489 return _has_ea_local_in_scope; 490 } 491 492 void disconnect_from_root(PhaseIterGVN *igvn); 493 494 // Standard Node stuff 495 virtual int Opcode() const; 496 virtual bool pinned() const { return true; } 497 virtual const Type* Value(PhaseGVN* phase) const; 498 virtual const Type* bottom_type() const { return Type::CONTROL; } 499 virtual const TypePtr* adr_type() const { return _adr_type; } 500 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 501 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 502 virtual Node* Identity(PhaseGVN* phase); 503 virtual uint ideal_reg() const { return 0; } 504 virtual const RegMask &in_RegMask(uint) const; 505 virtual const RegMask &out_RegMask() const; 506 virtual uint match_edge(uint idx) const; 507 508 #ifndef PRODUCT 509 virtual void dump_spec(outputStream *st) const; 510 #endif 511 }; 512 513 //------------------------------SafePointScalarObjectNode---------------------- 514 // A SafePointScalarObjectNode represents the state of a scalarized object 515 // at a safepoint. 516 class SafePointScalarObjectNode: public TypeNode { 517 uint _first_index; // First input edge relative index of a SafePoint node where 518 // states of the scalarized object fields are collected. 519 uint _depth; // Depth of the JVM state the _first_index field refers to 520 uint _n_fields; // Number of non-static fields of the scalarized object. 521 522 Node* _alloc; // Just for debugging purposes. 523 524 virtual uint hash() const; 525 virtual bool cmp( const Node &n ) const; 526 527 uint first_index() const { return _first_index; } 528 529 public: 530 SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields); 531 532 virtual int Opcode() const; 533 virtual uint ideal_reg() const; 534 virtual const RegMask &in_RegMask(uint) const; 535 virtual const RegMask &out_RegMask() const; 536 virtual uint match_edge(uint idx) const; 537 538 uint first_index(JVMState* jvms) const { 539 assert(jvms != nullptr, "missed JVMS"); 540 return jvms->of_depth(_depth)->scloff() + _first_index; 541 } 542 uint n_fields() const { return _n_fields; } 543 544 #ifdef ASSERT 545 Node* alloc() const { return _alloc; } 546 #endif 547 548 virtual uint size_of() const { return sizeof(*this); } 549 550 // Assumes that "this" is an argument to a safepoint node "s", and that 551 // "new_call" is being created to correspond to "s". But the difference 552 // between the start index of the jvmstates of "new_call" and "s" is 553 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 554 // corresponds appropriately to "this" in "new_call". Assumes that 555 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 556 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 557 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const; 558 559 #ifndef PRODUCT 560 virtual void dump_spec(outputStream *st) const; 561 #endif 562 }; 563 564 //------------------------------SafePointScalarMergeNode---------------------- 565 // 566 // This class represents an allocation merge that is used as debug information 567 // and had at least one of its input scalar replaced. 568 // 569 // The required inputs of this node, except the control, are pointers to 570 // SafePointScalarObjectNodes that describe scalarized inputs of the original 571 // allocation merge. The other(s) properties of the class are described below. 572 // 573 // _merge_pointer_idx : index in the SafePointNode's input array where the 574 // description of the _allocation merge_ starts. The index is zero based and 575 // relative to the SafePoint's scloff. The two entries in the SafePointNode's 576 // input array starting at '_merge_pointer_idx` are Phi nodes representing: 577 // 578 // 1) The original merge Phi. During rematerialization this input will only be 579 // used if the "selector Phi" (see below) indicates that the execution of the 580 // Phi took the path of a non scalarized input. 581 // 582 // 2) A "selector Phi". The output of this Phi will be '-1' if the execution 583 // of the method exercised a non scalarized input of the original Phi. 584 // Otherwise, the output will be >=0, and it will indicate the index-1 in the 585 // SafePointScalarMergeNode input array where the description of the 586 // scalarized object that should be used is. 587 // 588 // As an example, consider a Phi merging 3 inputs, of which the last 2 are 589 // scalar replaceable. 590 // 591 // Phi(Region, NSR, SR, SR) 592 // 593 // During scalar replacement the SR inputs will be changed to null: 594 // 595 // Phi(Region, NSR, nullptr, nullptr) 596 // 597 // A corresponding selector Phi will be created with a configuration like this: 598 // 599 // Phi(Region, -1, 0, 1) 600 // 601 // During execution of the compiled method, if the execution reaches a Trap, the 602 // output of the selector Phi will tell if we need to rematerialize one of the 603 // scalar replaced inputs or if we should just use the pointer returned by the 604 // original Phi. 605 606 class SafePointScalarMergeNode: public TypeNode { 607 int _merge_pointer_idx; // This is the first input edge relative 608 // index of a SafePoint node where metadata information relative 609 // to restoring the merge is stored. The corresponding input 610 // in the associated SafePoint will point to a Phi representing 611 // potential non-scalar replaced objects. 612 613 virtual uint hash() const; 614 virtual bool cmp( const Node &n ) const; 615 616 public: 617 SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx); 618 619 virtual int Opcode() const; 620 virtual uint ideal_reg() const; 621 virtual const RegMask &in_RegMask(uint) const; 622 virtual const RegMask &out_RegMask() const; 623 virtual uint match_edge(uint idx) const; 624 625 virtual uint size_of() const { return sizeof(*this); } 626 627 int merge_pointer_idx(JVMState* jvms) const { 628 assert(jvms != nullptr, "JVMS reference is null."); 629 return jvms->scloff() + _merge_pointer_idx; 630 } 631 632 int selector_idx(JVMState* jvms) const { 633 assert(jvms != nullptr, "JVMS reference is null."); 634 return jvms->scloff() + _merge_pointer_idx + 1; 635 } 636 637 // Assumes that "this" is an argument to a safepoint node "s", and that 638 // "new_call" is being created to correspond to "s". But the difference 639 // between the start index of the jvmstates of "new_call" and "s" is 640 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 641 // corresponds appropriately to "this" in "new_call". Assumes that 642 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 643 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 644 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const; 645 646 #ifndef PRODUCT 647 virtual void dump_spec(outputStream *st) const; 648 #endif 649 }; 650 651 // Simple container for the outgoing projections of a call. Useful 652 // for serious surgery on calls. 653 class CallProjections { 654 public: 655 Node* fallthrough_proj; 656 Node* fallthrough_catchproj; 657 Node* fallthrough_memproj; 658 Node* fallthrough_ioproj; 659 Node* catchall_catchproj; 660 Node* catchall_memproj; 661 Node* catchall_ioproj; 662 Node* exobj; 663 uint nb_resproj; 664 Node* resproj[1]; // at least one projection 665 666 CallProjections(uint nbres) { 667 fallthrough_proj = nullptr; 668 fallthrough_catchproj = nullptr; 669 fallthrough_memproj = nullptr; 670 fallthrough_ioproj = nullptr; 671 catchall_catchproj = nullptr; 672 catchall_memproj = nullptr; 673 catchall_ioproj = nullptr; 674 exobj = nullptr; 675 nb_resproj = nbres; 676 resproj[0] = nullptr; 677 for (uint i = 1; i < nb_resproj; i++) { 678 resproj[i] = nullptr; 679 } 680 } 681 682 }; 683 684 class CallGenerator; 685 686 //------------------------------CallNode--------------------------------------- 687 // Call nodes now subsume the function of debug nodes at callsites, so they 688 // contain the functionality of a full scope chain of debug nodes. 689 class CallNode : public SafePointNode { 690 691 protected: 692 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase); 693 694 public: 695 const TypeFunc* _tf; // Function type 696 address _entry_point; // Address of method being called 697 float _cnt; // Estimate of number of times called 698 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 699 const char* _name; // Printable name, if _method is null 700 701 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr) 702 : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type), 703 _tf(tf), 704 _entry_point(addr), 705 _cnt(COUNT_UNKNOWN), 706 _generator(nullptr), 707 _name(nullptr) 708 { 709 init_class_id(Class_Call); 710 } 711 712 const TypeFunc* tf() const { return _tf; } 713 address entry_point() const { return _entry_point; } 714 float cnt() const { return _cnt; } 715 CallGenerator* generator() const { return _generator; } 716 717 void set_tf(const TypeFunc* tf) { _tf = tf; } 718 void set_entry_point(address p) { _entry_point = p; } 719 void set_cnt(float c) { _cnt = c; } 720 void set_generator(CallGenerator* cg) { _generator = cg; } 721 722 virtual const Type* bottom_type() const; 723 virtual const Type* Value(PhaseGVN* phase) const; 724 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 725 virtual Node* Identity(PhaseGVN* phase) { return this; } 726 virtual bool cmp(const Node &n) const; 727 virtual uint size_of() const = 0; 728 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const; 729 virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask); 730 virtual uint ideal_reg() const { return NotAMachineReg; } 731 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 732 // for some macro nodes whose expansion does not have a safepoint on the fast path. 733 virtual bool guaranteed_safepoint() { return true; } 734 // For macro nodes, the JVMState gets modified during expansion. If calls 735 // use MachConstantBase, it gets modified during matching. So when cloning 736 // the node the JVMState must be deep cloned. Default is to shallow clone. 737 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); } 738 739 // Returns true if the call may modify n 740 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase); 741 // Does this node have a use of n other than in debug information? 742 bool has_non_debug_use(Node* n); 743 bool has_debug_use(Node* n); 744 // Returns the unique CheckCastPP of a call 745 // or result projection is there are several CheckCastPP 746 // or returns null if there is no one. 747 Node* result_cast(); 748 // Does this node returns pointer? 749 bool returns_pointer() const { 750 const TypeTuple* r = tf()->range_sig(); 751 return (!tf()->returns_inline_type_as_fields() && 752 r->cnt() > TypeFunc::Parms && 753 r->field_at(TypeFunc::Parms)->isa_ptr()); 754 } 755 756 // Collect all the interesting edges from a call for use in 757 // replacing the call by something else. Used by macro expansion 758 // and the late inlining support. 759 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); 760 761 virtual uint match_edge(uint idx) const; 762 763 bool is_call_to_arraycopystub() const; 764 765 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {} 766 767 #ifndef PRODUCT 768 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; 769 virtual void dump_spec(outputStream* st) const; 770 #endif 771 }; 772 773 774 //------------------------------CallJavaNode----------------------------------- 775 // Make a static or dynamic subroutine call node using Java calling 776 // convention. (The "Java" calling convention is the compiler's calling 777 // convention, as opposed to the interpreter's or that of native C.) 778 class CallJavaNode : public CallNode { 779 protected: 780 virtual bool cmp( const Node &n ) const; 781 virtual uint size_of() const; // Size is bigger 782 783 ciMethod* _method; // Method being direct called 784 bool _optimized_virtual; 785 bool _method_handle_invoke; 786 bool _override_symbolic_info; // Override symbolic call site info from bytecode 787 bool _arg_escape; // ArgEscape in parameter list 788 public: 789 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method) 790 : CallNode(tf, addr, TypePtr::BOTTOM), 791 _method(method), 792 _optimized_virtual(false), 793 _method_handle_invoke(false), 794 _override_symbolic_info(false), 795 _arg_escape(false) 796 { 797 init_class_id(Class_CallJava); 798 } 799 800 virtual int Opcode() const; 801 ciMethod* method() const { return _method; } 802 void set_method(ciMethod *m) { _method = m; } 803 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 804 bool is_optimized_virtual() const { return _optimized_virtual; } 805 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 806 bool is_method_handle_invoke() const { return _method_handle_invoke; } 807 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 808 bool override_symbolic_info() const { return _override_symbolic_info; } 809 void set_arg_escape(bool f) { _arg_escape = f; } 810 bool arg_escape() const { return _arg_escape; } 811 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt); 812 813 DEBUG_ONLY( bool validate_symbolic_info() const; ) 814 815 #ifndef PRODUCT 816 virtual void dump_spec(outputStream *st) const; 817 virtual void dump_compact_spec(outputStream *st) const; 818 #endif 819 }; 820 821 //------------------------------CallStaticJavaNode----------------------------- 822 // Make a direct subroutine call using Java calling convention (for static 823 // calls and optimized virtual calls, plus calls to wrappers for run-time 824 // routines); generates static stub. 825 class CallStaticJavaNode : public CallJavaNode { 826 virtual bool cmp( const Node &n ) const; 827 virtual uint size_of() const; // Size is bigger 828 829 bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg); 830 831 public: 832 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method) 833 : CallJavaNode(tf, addr, method) { 834 init_class_id(Class_CallStaticJava); 835 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) { 836 init_flags(Flag_is_macro); 837 C->add_macro_node(this); 838 } 839 const TypeTuple *r = tf->range_sig(); 840 if (InlineTypeReturnedAsFields && 841 method != nullptr && 842 method->is_method_handle_intrinsic() && 843 r->cnt() > TypeFunc::Parms && 844 r->field_at(TypeFunc::Parms)->isa_oopptr() && 845 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) { 846 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return 847 init_flags(Flag_is_macro); 848 C->add_macro_node(this); 849 } 850 } 851 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type) 852 : CallJavaNode(tf, addr, nullptr) { 853 init_class_id(Class_CallStaticJava); 854 // This node calls a runtime stub, which often has narrow memory effects. 855 _adr_type = adr_type; 856 _name = name; 857 } 858 859 // If this is an uncommon trap, return the request code, else zero. 860 int uncommon_trap_request() const; 861 bool is_uncommon_trap() const; 862 static int extract_uncommon_trap_request(const Node* call); 863 864 bool is_boxing_method() const { 865 return is_macro() && (method() != nullptr) && method()->is_boxing_method(); 866 } 867 // Late inlining modifies the JVMState, so we need to deep clone it 868 // when the call node is cloned (because it is macro node). 869 virtual bool needs_deep_clone_jvms(Compile* C) { 870 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C); 871 } 872 873 virtual int Opcode() const; 874 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 875 876 #ifndef PRODUCT 877 virtual void dump_spec(outputStream *st) const; 878 virtual void dump_compact_spec(outputStream *st) const; 879 #endif 880 }; 881 882 //------------------------------CallDynamicJavaNode---------------------------- 883 // Make a dispatched call using Java calling convention. 884 class CallDynamicJavaNode : public CallJavaNode { 885 virtual bool cmp( const Node &n ) const; 886 virtual uint size_of() const; // Size is bigger 887 public: 888 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index) 889 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) { 890 init_class_id(Class_CallDynamicJava); 891 } 892 893 // Late inlining modifies the JVMState, so we need to deep clone it 894 // when the call node is cloned. 895 virtual bool needs_deep_clone_jvms(Compile* C) { 896 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C); 897 } 898 899 int _vtable_index; 900 virtual int Opcode() const; 901 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 902 #ifndef PRODUCT 903 virtual void dump_spec(outputStream *st) const; 904 #endif 905 }; 906 907 //------------------------------CallRuntimeNode-------------------------------- 908 // Make a direct subroutine call node into compiled C++ code. 909 class CallRuntimeNode : public CallNode { 910 protected: 911 virtual bool cmp( const Node &n ) const; 912 virtual uint size_of() const; // Size is bigger 913 public: 914 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 915 const TypePtr* adr_type, JVMState* jvms = nullptr) 916 : CallNode(tf, addr, adr_type, jvms) 917 { 918 init_class_id(Class_CallRuntime); 919 _name = name; 920 } 921 922 virtual int Opcode() const; 923 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 924 925 #ifndef PRODUCT 926 virtual void dump_spec(outputStream *st) const; 927 #endif 928 }; 929 930 //------------------------------CallLeafNode----------------------------------- 931 // Make a direct subroutine call node into compiled C++ code, without 932 // safepoints 933 class CallLeafNode : public CallRuntimeNode { 934 public: 935 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 936 const TypePtr* adr_type) 937 : CallRuntimeNode(tf, addr, name, adr_type) 938 { 939 init_class_id(Class_CallLeaf); 940 } 941 virtual int Opcode() const; 942 virtual bool guaranteed_safepoint() { return false; } 943 #ifndef PRODUCT 944 virtual void dump_spec(outputStream *st) const; 945 #endif 946 }; 947 948 //------------------------------CallLeafNoFPNode------------------------------- 949 // CallLeafNode, not using floating point or using it in the same manner as 950 // the generated code 951 class CallLeafNoFPNode : public CallLeafNode { 952 public: 953 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 954 const TypePtr* adr_type) 955 : CallLeafNode(tf, addr, name, adr_type) 956 { 957 init_class_id(Class_CallLeafNoFP); 958 } 959 virtual int Opcode() const; 960 virtual uint match_edge(uint idx) const; 961 }; 962 963 //------------------------------CallLeafVectorNode------------------------------- 964 // CallLeafNode but calling with vector calling convention instead. 965 class CallLeafVectorNode : public CallLeafNode { 966 private: 967 uint _num_bits; 968 protected: 969 virtual bool cmp( const Node &n ) const; 970 virtual uint size_of() const; // Size is bigger 971 public: 972 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name, 973 const TypePtr* adr_type, uint num_bits) 974 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits) 975 { 976 } 977 virtual int Opcode() const; 978 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 979 }; 980 981 982 //------------------------------Allocate--------------------------------------- 983 // High-level memory allocation 984 // 985 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 986 // get expanded into a code sequence containing a call. Unlike other CallNodes, 987 // they have 2 memory projections and 2 i_o projections (which are distinguished by 988 // the _is_io_use flag in the projection.) This is needed when expanding the node in 989 // order to differentiate the uses of the projection on the normal control path from 990 // those on the exception return path. 991 // 992 class AllocateNode : public CallNode { 993 public: 994 enum { 995 // Output: 996 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 997 // Inputs: 998 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 999 KlassNode, // type (maybe dynamic) of the obj. 1000 InitialTest, // slow-path test (may be constant) 1001 ALength, // array length (or TOP if none) 1002 ValidLengthTest, 1003 InlineType, // InlineTypeNode if this is an inline type allocation 1004 DefaultValue, // default value in case of non-flat inline type array 1005 RawDefaultValue, // same as above but as raw machine word 1006 ParmLimit 1007 }; 1008 1009 static const TypeFunc* alloc_type(const Type* t) { 1010 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 1011 fields[AllocSize] = TypeInt::POS; 1012 fields[KlassNode] = TypeInstPtr::NOTNULL; 1013 fields[InitialTest] = TypeInt::BOOL; 1014 fields[ALength] = t; // length (can be a bad length) 1015 fields[ValidLengthTest] = TypeInt::BOOL; 1016 fields[InlineType] = Type::BOTTOM; 1017 fields[DefaultValue] = TypeInstPtr::NOTNULL; 1018 fields[RawDefaultValue] = TypeX_X; 1019 1020 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 1021 1022 // create result type (range) 1023 fields = TypeTuple::fields(1); 1024 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 1025 1026 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1027 1028 return TypeFunc::make(domain, range); 1029 } 1030 1031 // Result of Escape Analysis 1032 bool _is_scalar_replaceable; 1033 bool _is_non_escaping; 1034 // True when MemBar for new is redundant with MemBar at initialzer exit 1035 bool _is_allocation_MemBar_redundant; 1036 bool _larval; 1037 1038 virtual uint size_of() const; // Size is bigger 1039 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 1040 Node *size, Node *klass_node, Node *initial_test, 1041 InlineTypeNode* inline_type_node = nullptr); 1042 // Expansion modifies the JVMState, so we need to deep clone it 1043 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 1044 virtual int Opcode() const; 1045 virtual uint ideal_reg() const { return Op_RegP; } 1046 virtual bool guaranteed_safepoint() { return false; } 1047 1048 // allocations do not modify their arguments 1049 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;} 1050 1051 // Pattern-match a possible usage of AllocateNode. 1052 // Return null if no allocation is recognized. 1053 // The operand is the pointer produced by the (possible) allocation. 1054 // It must be a projection of the Allocate or its subsequent CastPP. 1055 // (Note: This function is defined in file graphKit.cpp, near 1056 // GraphKit::new_instance/new_array, whose output it recognizes.) 1057 // The 'ptr' may not have an offset unless the 'offset' argument is given. 1058 static AllocateNode* Ideal_allocation(Node* ptr); 1059 1060 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 1061 // an offset, which is reported back to the caller. 1062 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 1063 static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase, 1064 intptr_t& offset); 1065 1066 // Dig the klass operand out of a (possible) allocation site. 1067 static Node* Ideal_klass(Node* ptr, PhaseValues* phase) { 1068 AllocateNode* allo = Ideal_allocation(ptr); 1069 return (allo == nullptr) ? nullptr : allo->in(KlassNode); 1070 } 1071 1072 // Conservatively small estimate of offset of first non-header byte. 1073 int minimum_header_size() { 1074 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 1075 instanceOopDesc::base_offset_in_bytes(); 1076 } 1077 1078 // Return the corresponding initialization barrier (or null if none). 1079 // Walks out edges to find it... 1080 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1081 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1082 InitializeNode* initialization(); 1083 1084 // Convenience for initialization->maybe_set_complete(phase) 1085 bool maybe_set_complete(PhaseGVN* phase); 1086 1087 // Return true if allocation doesn't escape thread, its escape state 1088 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 1089 // is true when its allocation's escape state is noEscape or 1090 // ArgEscape. In case allocation's InitializeNode is null, check 1091 // AlllocateNode._is_non_escaping flag. 1092 // AlllocateNode._is_non_escaping is true when its escape state is 1093 // noEscape. 1094 bool does_not_escape_thread() { 1095 InitializeNode* init = nullptr; 1096 return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape()); 1097 } 1098 1099 // If object doesn't escape in <.init> method and there is memory barrier 1100 // inserted at exit of its <.init>, memory barrier for new is not necessary. 1101 // Inovke this method when MemBar at exit of initializer and post-dominate 1102 // allocation node. 1103 void compute_MemBar_redundancy(ciMethod* initializer); 1104 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 1105 1106 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem); 1107 }; 1108 1109 //------------------------------AllocateArray--------------------------------- 1110 // 1111 // High-level array allocation 1112 // 1113 class AllocateArrayNode : public AllocateNode { 1114 public: 1115 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node, 1116 Node* initial_test, Node* count_val, Node* valid_length_test, 1117 Node* default_value, Node* raw_default_value) 1118 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 1119 initial_test) 1120 { 1121 init_class_id(Class_AllocateArray); 1122 set_req(AllocateNode::ALength, count_val); 1123 set_req(AllocateNode::ValidLengthTest, valid_length_test); 1124 init_req(AllocateNode::DefaultValue, default_value); 1125 init_req(AllocateNode::RawDefaultValue, raw_default_value); 1126 } 1127 virtual uint size_of() const { return sizeof(*this); } 1128 virtual int Opcode() const; 1129 1130 // Dig the length operand out of a array allocation site. 1131 Node* Ideal_length() { 1132 return in(AllocateNode::ALength); 1133 } 1134 1135 // Dig the length operand out of a array allocation site and narrow the 1136 // type with a CastII, if necesssary 1137 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true); 1138 1139 // Pattern-match a possible usage of AllocateArrayNode. 1140 // Return null if no allocation is recognized. 1141 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) { 1142 AllocateNode* allo = Ideal_allocation(ptr); 1143 return (allo == nullptr || !allo->is_AllocateArray()) 1144 ? nullptr : allo->as_AllocateArray(); 1145 } 1146 }; 1147 1148 //------------------------------AbstractLockNode----------------------------------- 1149 class AbstractLockNode: public CallNode { 1150 private: 1151 enum { 1152 Regular = 0, // Normal lock 1153 NonEscObj, // Lock is used for non escaping object 1154 Coarsened, // Lock was coarsened 1155 Nested // Nested lock 1156 } _kind; 1157 1158 static const char* _kind_names[Nested+1]; 1159 1160 #ifndef PRODUCT 1161 NamedCounter* _counter; 1162 #endif 1163 1164 protected: 1165 // helper functions for lock elimination 1166 // 1167 1168 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1169 GrowableArray<AbstractLockNode*> &lock_ops); 1170 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1171 GrowableArray<AbstractLockNode*> &lock_ops); 1172 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1173 GrowableArray<AbstractLockNode*> &lock_ops); 1174 LockNode *find_matching_lock(UnlockNode* unlock); 1175 1176 // Update the counter to indicate that this lock was eliminated. 1177 void set_eliminated_lock_counter() PRODUCT_RETURN; 1178 1179 public: 1180 AbstractLockNode(const TypeFunc *tf) 1181 : CallNode(tf, nullptr, TypeRawPtr::BOTTOM), 1182 _kind(Regular) 1183 { 1184 #ifndef PRODUCT 1185 _counter = nullptr; 1186 #endif 1187 } 1188 virtual int Opcode() const = 0; 1189 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1190 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1191 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1192 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1193 1194 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1195 1196 virtual uint size_of() const { return sizeof(*this); } 1197 1198 bool is_eliminated() const { return (_kind != Regular); } 1199 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1200 bool is_coarsened() const { return (_kind == Coarsened); } 1201 bool is_nested() const { return (_kind == Nested); } 1202 1203 const char * kind_as_string() const; 1204 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const; 1205 1206 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1207 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1208 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1209 1210 // Check that all locks/unlocks associated with object come from balanced regions. 1211 // They can become unbalanced after coarsening optimization or on OSR entry. 1212 bool is_balanced(); 1213 1214 // locking does not modify its arguments 1215 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; } 1216 1217 #ifndef PRODUCT 1218 void create_lock_counter(JVMState* s); 1219 NamedCounter* counter() const { return _counter; } 1220 virtual void dump_spec(outputStream* st) const; 1221 virtual void dump_compact_spec(outputStream* st) const; 1222 #endif 1223 }; 1224 1225 //------------------------------Lock--------------------------------------- 1226 // High-level lock operation 1227 // 1228 // This is a subclass of CallNode because it is a macro node which gets expanded 1229 // into a code sequence containing a call. This node takes 3 "parameters": 1230 // 0 - object to lock 1231 // 1 - a BoxLockNode 1232 // 2 - a FastLockNode 1233 // 1234 class LockNode : public AbstractLockNode { 1235 static const TypeFunc* _lock_type_Type; 1236 public: 1237 1238 static inline const TypeFunc* lock_type() { 1239 assert(_lock_type_Type != nullptr, "should be initialized"); 1240 return _lock_type_Type; 1241 } 1242 1243 static void initialize_lock_Type() { 1244 assert(_lock_type_Type == nullptr, "should be called once"); 1245 // create input type (domain) 1246 const Type **fields = TypeTuple::fields(3); 1247 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1248 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1249 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1250 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1251 1252 // create result type (range) 1253 fields = TypeTuple::fields(0); 1254 1255 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1256 1257 _lock_type_Type = TypeFunc::make(domain,range); 1258 } 1259 1260 virtual int Opcode() const; 1261 virtual uint size_of() const; // Size is bigger 1262 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1263 init_class_id(Class_Lock); 1264 init_flags(Flag_is_macro); 1265 C->add_macro_node(this); 1266 } 1267 virtual bool guaranteed_safepoint() { return false; } 1268 1269 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1270 // Expansion modifies the JVMState, so we need to deep clone it 1271 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 1272 1273 bool is_nested_lock_region(); // Is this Lock nested? 1274 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1275 }; 1276 1277 //------------------------------Unlock--------------------------------------- 1278 // High-level unlock operation 1279 class UnlockNode : public AbstractLockNode { 1280 private: 1281 #ifdef ASSERT 1282 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1283 #endif 1284 public: 1285 virtual int Opcode() const; 1286 virtual uint size_of() const; // Size is bigger 1287 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1288 #ifdef ASSERT 1289 , _dbg_jvms(nullptr) 1290 #endif 1291 { 1292 init_class_id(Class_Unlock); 1293 init_flags(Flag_is_macro); 1294 C->add_macro_node(this); 1295 } 1296 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1297 // unlock is never a safepoint 1298 virtual bool guaranteed_safepoint() { return false; } 1299 #ifdef ASSERT 1300 void set_dbg_jvms(JVMState* s) { 1301 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1302 } 1303 JVMState* dbg_jvms() const { return _dbg_jvms; } 1304 #else 1305 JVMState* dbg_jvms() const { return nullptr; } 1306 #endif 1307 }; 1308 #endif // SHARE_OPTO_CALLNODE_HPP