1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/partialEscape.hpp" 33 #include "opto/phaseX.hpp" 34 #include "opto/replacednodes.hpp" 35 #include "opto/type.hpp" 36 #include "utilities/growableArray.hpp" 37 38 // Portions of code courtesy of Clifford Click 39 40 // Optimization - Graph Style 41 42 class NamedCounter; 43 class MultiNode; 44 class SafePointNode; 45 class CallNode; 46 class CallJavaNode; 47 class CallStaticJavaNode; 48 class CallDynamicJavaNode; 49 class CallRuntimeNode; 50 class CallLeafNode; 51 class CallLeafNoFPNode; 52 class CallLeafVectorNode; 53 class AllocateNode; 54 class AllocateArrayNode; 55 class AbstractLockNode; 56 class LockNode; 57 class UnlockNode; 58 class FastLockNode; 59 60 //------------------------------StartNode-------------------------------------- 61 // The method start node 62 class StartNode : public MultiNode { 63 virtual bool cmp( const Node &n ) const; 64 virtual uint size_of() const; // Size is bigger 65 public: 66 const TypeTuple *_domain; 67 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 68 init_class_id(Class_Start); 69 init_req(0,this); 70 init_req(1,root); 71 } 72 virtual int Opcode() const; 73 virtual bool pinned() const { return true; }; 74 virtual const Type *bottom_type() const; 75 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 76 virtual const Type* Value(PhaseGVN* phase) const; 77 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 78 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 79 virtual const RegMask &in_RegMask(uint) const; 80 virtual Node *match( const ProjNode *proj, const Matcher *m ); 81 virtual uint ideal_reg() const { return 0; } 82 #ifndef PRODUCT 83 virtual void dump_spec(outputStream *st) const; 84 virtual void dump_compact_spec(outputStream *st) const; 85 #endif 86 }; 87 88 //------------------------------StartOSRNode----------------------------------- 89 // The method start node for on stack replacement code 90 class StartOSRNode : public StartNode { 91 public: 92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 93 virtual int Opcode() const; 94 static const TypeTuple *osr_domain(); 95 }; 96 97 98 //------------------------------ParmNode--------------------------------------- 99 // Incoming parameters 100 class ParmNode : public ProjNode { 101 static const char * const names[TypeFunc::Parms+1]; 102 public: 103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 104 init_class_id(Class_Parm); 105 } 106 virtual int Opcode() const; 107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 108 virtual uint ideal_reg() const; 109 #ifndef PRODUCT 110 virtual void dump_spec(outputStream *st) const; 111 virtual void dump_compact_spec(outputStream *st) const; 112 #endif 113 }; 114 115 116 //------------------------------ReturnNode------------------------------------- 117 // Return from subroutine node 118 class ReturnNode : public Node { 119 public: 120 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 121 virtual int Opcode() const; 122 virtual bool is_CFG() const { return true; } 123 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 124 virtual bool depends_only_on_test() const { return false; } 125 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 126 virtual const Type* Value(PhaseGVN* phase) const; 127 virtual uint ideal_reg() const { return NotAMachineReg; } 128 virtual uint match_edge(uint idx) const; 129 #ifndef PRODUCT 130 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const; 131 #endif 132 }; 133 134 135 //------------------------------RethrowNode------------------------------------ 136 // Rethrow of exception at call site. Ends a procedure before rethrowing; 137 // ends the current basic block like a ReturnNode. Restores registers and 138 // unwinds stack. Rethrow happens in the caller's method. 139 class RethrowNode : public Node { 140 public: 141 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 142 virtual int Opcode() const; 143 virtual bool is_CFG() const { return true; } 144 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 145 virtual bool depends_only_on_test() const { return false; } 146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 147 virtual const Type* Value(PhaseGVN* phase) const; 148 virtual uint match_edge(uint idx) const; 149 virtual uint ideal_reg() const { return NotAMachineReg; } 150 #ifndef PRODUCT 151 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const; 152 #endif 153 }; 154 155 156 //------------------------------TailCallNode----------------------------------- 157 // Pop stack frame and jump indirect 158 class TailCallNode : public ReturnNode { 159 public: 160 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 161 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 162 init_req(TypeFunc::Parms, target); 163 init_req(TypeFunc::Parms+1, moop); 164 } 165 166 virtual int Opcode() const; 167 virtual uint match_edge(uint idx) const; 168 }; 169 170 //------------------------------TailJumpNode----------------------------------- 171 // Pop stack frame and jump indirect 172 class TailJumpNode : public ReturnNode { 173 public: 174 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 175 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 176 init_req(TypeFunc::Parms, target); 177 init_req(TypeFunc::Parms+1, ex_oop); 178 } 179 180 virtual int Opcode() const; 181 virtual uint match_edge(uint idx) const; 182 }; 183 184 //-------------------------------JVMState------------------------------------- 185 // A linked list of JVMState nodes captures the whole interpreter state, 186 // plus GC roots, for all active calls at some call site in this compilation 187 // unit. (If there is no inlining, then the list has exactly one link.) 188 // This provides a way to map the optimized program back into the interpreter, 189 // or to let the GC mark the stack. 190 class JVMState : public ResourceObj { 191 friend class VMStructs; 192 public: 193 typedef enum { 194 Reexecute_Undefined = -1, // not defined -- will be translated into false later 195 Reexecute_False = 0, // false -- do not reexecute 196 Reexecute_True = 1 // true -- reexecute the bytecode 197 } ReexecuteState; //Reexecute State 198 199 private: 200 JVMState* _caller; // List pointer for forming scope chains 201 uint _depth; // One more than caller depth, or one. 202 uint _locoff; // Offset to locals in input edge mapping 203 uint _stkoff; // Offset to stack in input edge mapping 204 uint _monoff; // Offset to monitors in input edge mapping 205 uint _scloff; // Offset to fields of scalar objs in input edge mapping 206 uint _endoff; // Offset to end of input edge mapping 207 uint _sp; // Java Expression Stack Pointer for this state 208 int _bci; // Byte Code Index of this JVM point 209 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 210 ciMethod* _method; // Method Pointer 211 SafePointNode* _map; // Map node associated with this scope 212 PEAState _alloc_state; 213 214 public: 215 friend class Compile; 216 friend class PreserveReexecuteState; 217 218 // Because JVMState objects live over the entire lifetime of the 219 // Compile object, they are allocated into the comp_arena, which 220 // does not get resource marked or reset during the compile process 221 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 222 void operator delete( void * ) { } // fast deallocation 223 224 // Create a new JVMState, ready for abstract interpretation. 225 JVMState(ciMethod* method, JVMState* caller); 226 JVMState(int stack_size); // root state; has a null method 227 228 // Access functions for the JVM 229 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 230 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 231 uint locoff() const { return _locoff; } 232 uint stkoff() const { return _stkoff; } 233 uint argoff() const { return _stkoff + _sp; } 234 uint monoff() const { return _monoff; } 235 uint scloff() const { return _scloff; } 236 uint endoff() const { return _endoff; } 237 uint oopoff() const { return debug_end(); } 238 239 int loc_size() const { return stkoff() - locoff(); } 240 int stk_size() const { return monoff() - stkoff(); } 241 int mon_size() const { return scloff() - monoff(); } 242 int scl_size() const { return endoff() - scloff(); } 243 244 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 245 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 246 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 247 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 248 249 uint sp() const { return _sp; } 250 int bci() const { return _bci; } 251 bool should_reexecute() const { return _reexecute==Reexecute_True; } 252 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 253 bool has_method() const { return _method != nullptr; } 254 ciMethod* method() const { assert(has_method(), ""); return _method; } 255 JVMState* caller() const { return _caller; } 256 SafePointNode* map() const { return _map; } 257 uint depth() const { return _depth; } 258 uint debug_start() const; // returns locoff of root caller 259 uint debug_end() const; // returns endoff of self 260 uint debug_size() const { 261 return loc_size() + sp() + mon_size() + scl_size(); 262 } 263 uint debug_depth() const; // returns sum of debug_size values at all depths 264 265 // Returns the JVM state at the desired depth (1 == root). 266 JVMState* of_depth(int d) const; 267 268 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 269 bool same_calls_as(const JVMState* that) const; 270 271 // Monitors (monitors are stored as (boxNode, objNode) pairs 272 enum { logMonitorEdges = 1 }; 273 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 274 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 275 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 276 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 277 bool is_monitor_box(uint off) const { 278 assert(is_mon(off), "should be called only for monitor edge"); 279 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 280 } 281 bool is_monitor_use(uint off) const { return (is_mon(off) 282 && is_monitor_box(off)) 283 || (caller() && caller()->is_monitor_use(off)); } 284 285 // Initialization functions for the JVM 286 void set_locoff(uint off) { _locoff = off; } 287 void set_stkoff(uint off) { _stkoff = off; } 288 void set_monoff(uint off) { _monoff = off; } 289 void set_scloff(uint off) { _scloff = off; } 290 void set_endoff(uint off) { _endoff = off; } 291 void set_offsets(uint off) { 292 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 293 } 294 void set_map(SafePointNode* map) { _map = map; } 295 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode 296 void set_sp(uint sp) { _sp = sp; } 297 // _reexecute is initialized to "undefined" for a new bci 298 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 299 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 300 301 // Miscellaneous utility functions 302 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 303 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 304 void set_map_deep(SafePointNode *map);// reset map for all callers 305 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 306 int interpreter_frame_size() const; 307 PEAState& alloc_state() { return _alloc_state; } 308 309 #ifndef PRODUCT 310 void print_method_with_lineno(outputStream* st, bool show_name) const; 311 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 312 void dump_spec(outputStream *st) const; 313 void dump_on(outputStream* st) const; 314 void dump() const { 315 dump_on(tty); 316 } 317 #endif 318 }; 319 320 //------------------------------SafePointNode---------------------------------- 321 // A SafePointNode is a subclass of a MultiNode for convenience (and 322 // potential code sharing) only - conceptually it is independent of 323 // the Node semantics. 324 class SafePointNode : public MultiNode { 325 friend JVMState; 326 friend class GraphKit; 327 friend class VMStructs; 328 329 virtual bool cmp( const Node &n ) const; 330 virtual uint size_of() const; // Size is bigger 331 332 protected: 333 JVMState* const _jvms; // Pointer to list of JVM State objects 334 // Many calls take *all* of memory as input, 335 // but some produce a limited subset of that memory as output. 336 // The adr_type reports the call's behavior as a store, not a load. 337 const TypePtr* _adr_type; // What type of memory does this node produce? 338 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 339 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States 340 341 void set_jvms(JVMState* s) { 342 assert(s != nullptr, "assign null value to _jvms"); 343 *(JVMState**)&_jvms = s; // override const attribute in the accessor 344 } 345 public: 346 SafePointNode(uint edges, JVMState* jvms, 347 // A plain safepoint advertises no memory effects (null): 348 const TypePtr* adr_type = nullptr) 349 : MultiNode( edges ), 350 _jvms(jvms), 351 _adr_type(adr_type), 352 _has_ea_local_in_scope(false) 353 { 354 init_class_id(Class_SafePoint); 355 } 356 357 JVMState* jvms() const { return _jvms; } 358 virtual bool needs_deep_clone_jvms(Compile* C) { return false; } 359 void clone_jvms(Compile* C) { 360 if (jvms() != nullptr) { 361 if (needs_deep_clone_jvms(C)) { 362 set_jvms(jvms()->clone_deep(C)); 363 jvms()->set_map_deep(this); 364 } else { 365 jvms()->clone_shallow(C)->bind_map(this); 366 } 367 } 368 } 369 370 private: 371 void verify_input(JVMState* jvms, uint idx) const { 372 assert(verify_jvms(jvms), "jvms must match"); 373 Node* n = in(idx); 374 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 375 in(idx + 1)->is_top(), "2nd half of long/double"); 376 } 377 378 public: 379 // Functionality from old debug nodes which has changed 380 Node *local(JVMState* jvms, uint idx) const { 381 verify_input(jvms, jvms->locoff() + idx); 382 return in(jvms->locoff() + idx); 383 } 384 Node *stack(JVMState* jvms, uint idx) const { 385 verify_input(jvms, jvms->stkoff() + idx); 386 return in(jvms->stkoff() + idx); 387 } 388 Node *argument(JVMState* jvms, uint idx) const { 389 verify_input(jvms, jvms->argoff() + idx); 390 return in(jvms->argoff() + idx); 391 } 392 Node *monitor_box(JVMState* jvms, uint idx) const { 393 assert(verify_jvms(jvms), "jvms must match"); 394 return in(jvms->monitor_box_offset(idx)); 395 } 396 Node *monitor_obj(JVMState* jvms, uint idx) const { 397 assert(verify_jvms(jvms), "jvms must match"); 398 return in(jvms->monitor_obj_offset(idx)); 399 } 400 401 void set_local(JVMState* jvms, uint idx, Node *c); 402 403 void set_stack(JVMState* jvms, uint idx, Node *c) { 404 assert(verify_jvms(jvms), "jvms must match"); 405 set_req(jvms->stkoff() + idx, c); 406 } 407 void set_argument(JVMState* jvms, uint idx, Node *c) { 408 assert(verify_jvms(jvms), "jvms must match"); 409 set_req(jvms->argoff() + idx, c); 410 } 411 void ensure_stack(JVMState* jvms, uint stk_size) { 412 assert(verify_jvms(jvms), "jvms must match"); 413 int grow_by = (int)stk_size - (int)jvms->stk_size(); 414 if (grow_by > 0) grow_stack(jvms, grow_by); 415 } 416 void grow_stack(JVMState* jvms, uint grow_by); 417 // Handle monitor stack 418 void push_monitor( const FastLockNode *lock ); 419 void pop_monitor (); 420 Node *peek_monitor_box() const; 421 Node *peek_monitor_obj() const; 422 // Peek Operand Stacks, JVMS 2.6.2 423 Node* peek_operand(uint off = 0) const; 424 425 // Access functions for the JVM 426 Node *control () const { return in(TypeFunc::Control ); } 427 Node *i_o () const { return in(TypeFunc::I_O ); } 428 Node *memory () const { return in(TypeFunc::Memory ); } 429 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 430 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 431 432 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 433 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 434 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 435 436 MergeMemNode* merged_memory() const { 437 return in(TypeFunc::Memory)->as_MergeMem(); 438 } 439 440 // The parser marks useless maps as dead when it's done with them: 441 bool is_killed() { return in(TypeFunc::Control) == nullptr; } 442 443 // Exception states bubbling out of subgraphs such as inlined calls 444 // are recorded here. (There might be more than one, hence the "next".) 445 // This feature is used only for safepoints which serve as "maps" 446 // for JVM states during parsing, intrinsic expansion, etc. 447 SafePointNode* next_exception() const; 448 void set_next_exception(SafePointNode* n); 449 bool has_exceptions() const { return next_exception() != nullptr; } 450 451 // Helper methods to operate on replaced nodes 452 ReplacedNodes replaced_nodes() const { 453 return _replaced_nodes; 454 } 455 456 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 457 _replaced_nodes = replaced_nodes; 458 } 459 460 void clone_replaced_nodes() { 461 _replaced_nodes.clone(); 462 } 463 void record_replaced_node(Node* initial, Node* improved) { 464 _replaced_nodes.record(initial, improved); 465 } 466 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 467 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 468 } 469 void delete_replaced_nodes() { 470 _replaced_nodes.reset(); 471 } 472 void apply_replaced_nodes(uint idx) { 473 _replaced_nodes.apply(this, idx); 474 } 475 void merge_replaced_nodes_with(SafePointNode* sfpt) { 476 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 477 } 478 bool has_replaced_nodes() const { 479 return !_replaced_nodes.is_empty(); 480 } 481 void set_has_ea_local_in_scope(bool b) { 482 _has_ea_local_in_scope = b; 483 } 484 bool has_ea_local_in_scope() const { 485 return _has_ea_local_in_scope; 486 } 487 488 void disconnect_from_root(PhaseIterGVN *igvn); 489 490 // Standard Node stuff 491 virtual int Opcode() const; 492 virtual bool pinned() const { return true; } 493 virtual const Type* Value(PhaseGVN* phase) const; 494 virtual const Type* bottom_type() const { return Type::CONTROL; } 495 virtual const TypePtr* adr_type() const { return _adr_type; } 496 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 497 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 498 virtual Node* Identity(PhaseGVN* phase); 499 virtual uint ideal_reg() const { return 0; } 500 virtual const RegMask &in_RegMask(uint) const; 501 virtual const RegMask &out_RegMask() const; 502 virtual uint match_edge(uint idx) const; 503 504 #ifndef PRODUCT 505 virtual void dump_spec(outputStream *st) const; 506 #endif 507 }; 508 509 //------------------------------SafePointScalarObjectNode---------------------- 510 // A SafePointScalarObjectNode represents the state of a scalarized object 511 // at a safepoint. 512 class SafePointScalarObjectNode: public TypeNode { 513 uint _first_index; // First input edge relative index of a SafePoint node where 514 // states of the scalarized object fields are collected. 515 // It is relative to the last (youngest) jvms->_scloff. 516 uint _n_fields; // Number of non-static fields of the scalarized object. 517 518 Node* _alloc; // Just for debugging purposes. 519 520 virtual uint hash() const; 521 virtual bool cmp( const Node &n ) const; 522 523 uint first_index() const { return _first_index; } 524 525 public: 526 SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint n_fields); 527 528 virtual int Opcode() const; 529 virtual uint ideal_reg() const; 530 virtual const RegMask &in_RegMask(uint) const; 531 virtual const RegMask &out_RegMask() const; 532 virtual uint match_edge(uint idx) const; 533 534 uint first_index(JVMState* jvms) const { 535 assert(jvms != nullptr, "missed JVMS"); 536 return jvms->scloff() + _first_index; 537 } 538 uint n_fields() const { return _n_fields; } 539 540 #ifdef ASSERT 541 Node* alloc() const { return _alloc; } 542 #endif 543 544 virtual uint size_of() const { return sizeof(*this); } 545 546 // Assumes that "this" is an argument to a safepoint node "s", and that 547 // "new_call" is being created to correspond to "s". But the difference 548 // between the start index of the jvmstates of "new_call" and "s" is 549 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 550 // corresponds appropriately to "this" in "new_call". Assumes that 551 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 552 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 553 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const; 554 555 #ifndef PRODUCT 556 virtual void dump_spec(outputStream *st) const; 557 #endif 558 }; 559 560 //------------------------------SafePointScalarMergeNode---------------------- 561 // 562 // This class represents an allocation merge that is used as debug information 563 // and had at least one of its input scalar replaced. 564 // 565 // The required inputs of this node, except the control, are pointers to 566 // SafePointScalarObjectNodes that describe scalarized inputs of the original 567 // allocation merge. The other(s) properties of the class are described below. 568 // 569 // _merge_pointer_idx : index in the SafePointNode's input array where the 570 // description of the _allocation merge_ starts. The index is zero based and 571 // relative to the SafePoint's scloff. The two entries in the SafePointNode's 572 // input array starting at '_merge_pointer_idx` are Phi nodes representing: 573 // 574 // 1) The original merge Phi. During rematerialization this input will only be 575 // used if the "selector Phi" (see below) indicates that the execution of the 576 // Phi took the path of a non scalarized input. 577 // 578 // 2) A "selector Phi". The output of this Phi will be '-1' if the execution 579 // of the method exercised a non scalarized input of the original Phi. 580 // Otherwise, the output will be >=0, and it will indicate the index-1 in the 581 // SafePointScalarMergeNode input array where the description of the 582 // scalarized object that should be used is. 583 // 584 // As an example, consider a Phi merging 3 inputs, of which the last 2 are 585 // scalar replaceable. 586 // 587 // Phi(Region, NSR, SR, SR) 588 // 589 // During scalar replacement the SR inputs will be changed to null: 590 // 591 // Phi(Region, NSR, nullptr, nullptr) 592 // 593 // A corresponding selector Phi will be created with a configuration like this: 594 // 595 // Phi(Region, -1, 0, 1) 596 // 597 // During execution of the compiled method, if the execution reaches a Trap, the 598 // output of the selector Phi will tell if we need to rematerialize one of the 599 // scalar replaced inputs or if we should just use the pointer returned by the 600 // original Phi. 601 602 class SafePointScalarMergeNode: public TypeNode { 603 int _merge_pointer_idx; // This is the first input edge relative 604 // index of a SafePoint node where metadata information relative 605 // to restoring the merge is stored. The corresponding input 606 // in the associated SafePoint will point to a Phi representing 607 // potential non-scalar replaced objects. 608 609 virtual uint hash() const; 610 virtual bool cmp( const Node &n ) const; 611 612 public: 613 SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx); 614 615 virtual int Opcode() const; 616 virtual uint ideal_reg() const; 617 virtual const RegMask &in_RegMask(uint) const; 618 virtual const RegMask &out_RegMask() const; 619 virtual uint match_edge(uint idx) const; 620 621 virtual uint size_of() const { return sizeof(*this); } 622 623 int merge_pointer_idx(JVMState* jvms) const { 624 assert(jvms != nullptr, "JVMS reference is null."); 625 return jvms->scloff() + _merge_pointer_idx; 626 } 627 628 int selector_idx(JVMState* jvms) const { 629 assert(jvms != nullptr, "JVMS reference is null."); 630 return jvms->scloff() + _merge_pointer_idx + 1; 631 } 632 633 // Assumes that "this" is an argument to a safepoint node "s", and that 634 // "new_call" is being created to correspond to "s". But the difference 635 // between the start index of the jvmstates of "new_call" and "s" is 636 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 637 // corresponds appropriately to "this" in "new_call". Assumes that 638 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 639 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 640 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const; 641 642 #ifndef PRODUCT 643 virtual void dump_spec(outputStream *st) const; 644 #endif 645 }; 646 647 // Simple container for the outgoing projections of a call. Useful 648 // for serious surgery on calls. 649 class CallProjections : public StackObj { 650 public: 651 Node* fallthrough_proj; 652 Node* fallthrough_catchproj; 653 Node* fallthrough_memproj; 654 Node* fallthrough_ioproj; 655 Node* catchall_catchproj; 656 Node* catchall_memproj; 657 Node* catchall_ioproj; 658 Node* resproj; 659 Node* exobj; 660 }; 661 662 class CallGenerator; 663 664 //------------------------------CallNode--------------------------------------- 665 // Call nodes now subsume the function of debug nodes at callsites, so they 666 // contain the functionality of a full scope chain of debug nodes. 667 class CallNode : public SafePointNode { 668 friend class VMStructs; 669 670 protected: 671 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase); 672 673 public: 674 const TypeFunc* _tf; // Function type 675 address _entry_point; // Address of method being called 676 float _cnt; // Estimate of number of times called 677 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 678 const char* _name; // Printable name, if _method is null 679 680 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr) 681 : SafePointNode(tf->domain()->cnt(), jvms, adr_type), 682 _tf(tf), 683 _entry_point(addr), 684 _cnt(COUNT_UNKNOWN), 685 _generator(nullptr), 686 _name(nullptr) 687 { 688 init_class_id(Class_Call); 689 } 690 691 const TypeFunc* tf() const { return _tf; } 692 address entry_point() const { return _entry_point; } 693 float cnt() const { return _cnt; } 694 CallGenerator* generator() const { return _generator; } 695 696 void set_tf(const TypeFunc* tf) { _tf = tf; } 697 void set_entry_point(address p) { _entry_point = p; } 698 void set_cnt(float c) { _cnt = c; } 699 void set_generator(CallGenerator* cg) { _generator = cg; } 700 701 virtual const Type* bottom_type() const; 702 virtual const Type* Value(PhaseGVN* phase) const; 703 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 704 virtual Node* Identity(PhaseGVN* phase) { return this; } 705 virtual bool cmp(const Node &n) const; 706 virtual uint size_of() const = 0; 707 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const; 708 virtual Node* match(const ProjNode* proj, const Matcher* m); 709 virtual uint ideal_reg() const { return NotAMachineReg; } 710 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 711 // for some macro nodes whose expansion does not have a safepoint on the fast path. 712 virtual bool guaranteed_safepoint() { return true; } 713 // For macro nodes, the JVMState gets modified during expansion. If calls 714 // use MachConstantBase, it gets modified during matching. So when cloning 715 // the node the JVMState must be deep cloned. Default is to shallow clone. 716 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); } 717 718 // Returns true if the call may modify n 719 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase); 720 // Does this node have a use of n other than in debug information? 721 bool has_non_debug_use(Node* n); 722 // Returns the unique CheckCastPP of a call 723 // or result projection is there are several CheckCastPP 724 // or returns null if there is no one. 725 Node* result_cast(); 726 // Does this node returns pointer? 727 bool returns_pointer() const { 728 const TypeTuple* r = tf()->range(); 729 return (r->cnt() > TypeFunc::Parms && 730 r->field_at(TypeFunc::Parms)->isa_ptr()); 731 } 732 733 // Collect all the interesting edges from a call for use in 734 // replacing the call by something else. Used by macro expansion 735 // and the late inlining support. 736 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true); 737 738 virtual uint match_edge(uint idx) const; 739 740 bool is_call_to_arraycopystub() const; 741 742 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {} 743 744 #ifndef PRODUCT 745 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; 746 virtual void dump_spec(outputStream* st) const; 747 #endif 748 }; 749 750 751 //------------------------------CallJavaNode----------------------------------- 752 // Make a static or dynamic subroutine call node using Java calling 753 // convention. (The "Java" calling convention is the compiler's calling 754 // convention, as opposed to the interpreter's or that of native C.) 755 class CallJavaNode : public CallNode { 756 friend class VMStructs; 757 protected: 758 virtual bool cmp( const Node &n ) const; 759 virtual uint size_of() const; // Size is bigger 760 761 bool _optimized_virtual; 762 bool _method_handle_invoke; 763 bool _override_symbolic_info; // Override symbolic call site info from bytecode 764 ciMethod* _method; // Method being direct called 765 bool _arg_escape; // ArgEscape in parameter list 766 public: 767 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method) 768 : CallNode(tf, addr, TypePtr::BOTTOM), 769 _optimized_virtual(false), 770 _method_handle_invoke(false), 771 _override_symbolic_info(false), 772 _method(method), 773 _arg_escape(false) 774 { 775 init_class_id(Class_CallJava); 776 } 777 778 virtual int Opcode() const; 779 ciMethod* method() const { return _method; } 780 void set_method(ciMethod *m) { _method = m; } 781 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 782 bool is_optimized_virtual() const { return _optimized_virtual; } 783 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 784 bool is_method_handle_invoke() const { return _method_handle_invoke; } 785 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 786 bool override_symbolic_info() const { return _override_symbolic_info; } 787 void set_arg_escape(bool f) { _arg_escape = f; } 788 bool arg_escape() const { return _arg_escape; } 789 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt); 790 791 DEBUG_ONLY( bool validate_symbolic_info() const; ) 792 793 #ifndef PRODUCT 794 virtual void dump_spec(outputStream *st) const; 795 virtual void dump_compact_spec(outputStream *st) const; 796 #endif 797 }; 798 799 //------------------------------CallStaticJavaNode----------------------------- 800 // Make a direct subroutine call using Java calling convention (for static 801 // calls and optimized virtual calls, plus calls to wrappers for run-time 802 // routines); generates static stub. 803 class CallStaticJavaNode : public CallJavaNode { 804 virtual bool cmp( const Node &n ) const; 805 virtual uint size_of() const; // Size is bigger 806 public: 807 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method) 808 : CallJavaNode(tf, addr, method) { 809 init_class_id(Class_CallStaticJava); 810 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) { 811 init_flags(Flag_is_macro); 812 C->add_macro_node(this); 813 } 814 } 815 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type) 816 : CallJavaNode(tf, addr, nullptr) { 817 init_class_id(Class_CallStaticJava); 818 // This node calls a runtime stub, which often has narrow memory effects. 819 _adr_type = adr_type; 820 _name = name; 821 } 822 823 // If this is an uncommon trap, return the request code, else zero. 824 int uncommon_trap_request() const; 825 bool is_uncommon_trap() const; 826 static int extract_uncommon_trap_request(const Node* call); 827 828 bool is_boxing_method() const { 829 return is_macro() && (method() != nullptr) && method()->is_boxing_method(); 830 } 831 // Late inlining modifies the JVMState, so we need to deep clone it 832 // when the call node is cloned (because it is macro node). 833 virtual bool needs_deep_clone_jvms(Compile* C) { 834 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C); 835 } 836 837 virtual int Opcode() const; 838 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 839 840 #ifndef PRODUCT 841 virtual void dump_spec(outputStream *st) const; 842 virtual void dump_compact_spec(outputStream *st) const; 843 #endif 844 }; 845 846 //------------------------------CallDynamicJavaNode---------------------------- 847 // Make a dispatched call using Java calling convention. 848 class CallDynamicJavaNode : public CallJavaNode { 849 virtual bool cmp( const Node &n ) const; 850 virtual uint size_of() const; // Size is bigger 851 public: 852 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index) 853 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) { 854 init_class_id(Class_CallDynamicJava); 855 } 856 857 // Late inlining modifies the JVMState, so we need to deep clone it 858 // when the call node is cloned. 859 virtual bool needs_deep_clone_jvms(Compile* C) { 860 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C); 861 } 862 863 int _vtable_index; 864 virtual int Opcode() const; 865 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 866 #ifndef PRODUCT 867 virtual void dump_spec(outputStream *st) const; 868 #endif 869 }; 870 871 //------------------------------CallRuntimeNode-------------------------------- 872 // Make a direct subroutine call node into compiled C++ code. 873 class CallRuntimeNode : public CallNode { 874 protected: 875 virtual bool cmp( const Node &n ) const; 876 virtual uint size_of() const; // Size is bigger 877 public: 878 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 879 const TypePtr* adr_type, JVMState* jvms = nullptr) 880 : CallNode(tf, addr, adr_type, jvms) 881 { 882 init_class_id(Class_CallRuntime); 883 _name = name; 884 } 885 886 virtual int Opcode() const; 887 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 888 889 #ifndef PRODUCT 890 virtual void dump_spec(outputStream *st) const; 891 #endif 892 }; 893 894 //------------------------------CallLeafNode----------------------------------- 895 // Make a direct subroutine call node into compiled C++ code, without 896 // safepoints 897 class CallLeafNode : public CallRuntimeNode { 898 public: 899 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 900 const TypePtr* adr_type) 901 : CallRuntimeNode(tf, addr, name, adr_type) 902 { 903 init_class_id(Class_CallLeaf); 904 } 905 virtual int Opcode() const; 906 virtual bool guaranteed_safepoint() { return false; } 907 #ifndef PRODUCT 908 virtual void dump_spec(outputStream *st) const; 909 #endif 910 }; 911 912 //------------------------------CallLeafNoFPNode------------------------------- 913 // CallLeafNode, not using floating point or using it in the same manner as 914 // the generated code 915 class CallLeafNoFPNode : public CallLeafNode { 916 public: 917 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 918 const TypePtr* adr_type) 919 : CallLeafNode(tf, addr, name, adr_type) 920 { 921 init_class_id(Class_CallLeafNoFP); 922 } 923 virtual int Opcode() const; 924 }; 925 926 //------------------------------CallLeafVectorNode------------------------------- 927 // CallLeafNode but calling with vector calling convention instead. 928 class CallLeafVectorNode : public CallLeafNode { 929 private: 930 uint _num_bits; 931 protected: 932 virtual bool cmp( const Node &n ) const; 933 virtual uint size_of() const; // Size is bigger 934 public: 935 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name, 936 const TypePtr* adr_type, uint num_bits) 937 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits) 938 { 939 } 940 virtual int Opcode() const; 941 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 942 }; 943 944 945 //------------------------------Allocate--------------------------------------- 946 // High-level memory allocation 947 // 948 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 949 // get expanded into a code sequence containing a call. Unlike other CallNodes, 950 // they have 2 memory projections and 2 i_o projections (which are distinguished by 951 // the _is_io_use flag in the projection.) This is needed when expanding the node in 952 // order to differentiate the uses of the projection on the normal control path from 953 // those on the exception return path. 954 // 955 class AllocateNode : public CallNode { 956 private: 957 int _materialized; // materializaiton counts by PEA 958 959 public: 960 enum { 961 // Output: 962 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 963 // Inputs: 964 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 965 KlassNode, // type (maybe dynamic) of the obj. 966 InitialTest, // slow-path test (may be constant) 967 ALength, // array length (or TOP if none) 968 ValidLengthTest, 969 ParmLimit 970 }; 971 972 static const TypeFunc* alloc_type(const Type* t) { 973 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 974 fields[AllocSize] = TypeInt::POS; 975 fields[KlassNode] = TypeInstPtr::NOTNULL; 976 fields[InitialTest] = TypeInt::BOOL; 977 fields[ALength] = t; // length (can be a bad length) 978 fields[ValidLengthTest] = TypeInt::BOOL; 979 980 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 981 982 // create result type (range) 983 fields = TypeTuple::fields(1); 984 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 985 986 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 987 988 return TypeFunc::make(domain, range); 989 } 990 991 // Result of Escape Analysis 992 bool _is_scalar_replaceable; 993 bool _is_non_escaping; 994 // True when MemBar for new is redundant with MemBar at initialzer exit 995 bool _is_allocation_MemBar_redundant; 996 997 virtual uint size_of() const; // Size is bigger 998 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 999 Node *size, Node *klass_node, Node *initial_test); 1000 // Expansion modifies the JVMState, so we need to deep clone it 1001 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 1002 virtual int Opcode() const; 1003 virtual uint ideal_reg() const { return Op_RegP; } 1004 virtual bool guaranteed_safepoint() { return false; } 1005 1006 // allocations do not modify their arguments 1007 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;} 1008 1009 // Pattern-match a possible usage of AllocateNode. 1010 // Return null if no allocation is recognized. 1011 // The operand is the pointer produced by the (possible) allocation. 1012 // It must be a projection of the Allocate or its subsequent CastPP. 1013 // (Note: This function is defined in file graphKit.cpp, near 1014 // GraphKit::new_instance/new_array, whose output it recognizes.) 1015 // The 'ptr' may not have an offset unless the 'offset' argument is given. 1016 static AllocateNode* Ideal_allocation(Node* ptr); 1017 1018 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 1019 // an offset, which is reported back to the caller. 1020 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 1021 static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase, 1022 intptr_t& offset); 1023 1024 // Dig the klass operand out of a (possible) allocation site. 1025 static Node* Ideal_klass(Node* ptr, PhaseValues* phase) { 1026 AllocateNode* allo = Ideal_allocation(ptr); 1027 return (allo == nullptr) ? nullptr : allo->in(KlassNode); 1028 } 1029 1030 // Conservatively small estimate of offset of first non-header byte. 1031 int minimum_header_size() { 1032 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 1033 instanceOopDesc::base_offset_in_bytes(); 1034 } 1035 1036 // Return the corresponding initialization barrier (or null if none). 1037 // Walks out edges to find it... 1038 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1039 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1040 InitializeNode* initialization(); 1041 1042 // Convenience for initialization->maybe_set_complete(phase) 1043 bool maybe_set_complete(PhaseGVN* phase); 1044 1045 // Return true if allocation doesn't escape thread, its escape state 1046 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 1047 // is true when its allocation's escape state is noEscape or 1048 // ArgEscape. In case allocation's InitializeNode is null, check 1049 // AlllocateNode._is_non_escaping flag. 1050 // AlllocateNode._is_non_escaping is true when its escape state is 1051 // noEscape. 1052 bool does_not_escape_thread() { 1053 InitializeNode* init = nullptr; 1054 return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape()); 1055 } 1056 1057 // If object doesn't escape in <.init> method and there is memory barrier 1058 // inserted at exit of its <.init>, memory barrier for new is not necessary. 1059 // Inovke this method when MemBar at exit of initializer and post-dominate 1060 // allocation node. 1061 void compute_MemBar_redundancy(ciMethod* initializer); 1062 bool is_allocation_MemBar_redundant() const { return _is_allocation_MemBar_redundant; } 1063 1064 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem); 1065 1066 const TypeOopPtr* oop_type(const PhaseValues& phase) const; 1067 1068 void inc_materialized() { 1069 _materialized++; 1070 } 1071 1072 int materialized_cnt() const { 1073 return _materialized; 1074 } 1075 }; 1076 1077 //------------------------------AllocateArray--------------------------------- 1078 // 1079 // High-level array allocation 1080 // 1081 class AllocateArrayNode : public AllocateNode { 1082 public: 1083 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node, 1084 Node* initial_test, Node* count_val, Node* valid_length_test) 1085 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 1086 initial_test) 1087 { 1088 init_class_id(Class_AllocateArray); 1089 set_req(AllocateNode::ALength, count_val); 1090 set_req(AllocateNode::ValidLengthTest, valid_length_test); 1091 } 1092 virtual int Opcode() const; 1093 1094 // Dig the length operand out of a array allocation site. 1095 Node* Ideal_length() { 1096 return in(AllocateNode::ALength); 1097 } 1098 1099 // Dig the length operand out of a array allocation site and narrow the 1100 // type with a CastII, if necesssary 1101 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true); 1102 1103 // Pattern-match a possible usage of AllocateArrayNode. 1104 // Return null if no allocation is recognized. 1105 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) { 1106 AllocateNode* allo = Ideal_allocation(ptr); 1107 return (allo == nullptr || !allo->is_AllocateArray()) 1108 ? nullptr : allo->as_AllocateArray(); 1109 } 1110 }; 1111 1112 //------------------------------AbstractLockNode----------------------------------- 1113 class AbstractLockNode: public CallNode { 1114 private: 1115 enum { 1116 Regular = 0, // Normal lock 1117 NonEscObj, // Lock is used for non escaping object 1118 Coarsened, // Lock was coarsened 1119 Nested // Nested lock 1120 } _kind; 1121 1122 static const char* _kind_names[Nested+1]; 1123 1124 #ifndef PRODUCT 1125 NamedCounter* _counter; 1126 #endif 1127 1128 protected: 1129 // helper functions for lock elimination 1130 // 1131 1132 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1133 GrowableArray<AbstractLockNode*> &lock_ops); 1134 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1135 GrowableArray<AbstractLockNode*> &lock_ops); 1136 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1137 GrowableArray<AbstractLockNode*> &lock_ops); 1138 LockNode *find_matching_lock(UnlockNode* unlock); 1139 1140 // Update the counter to indicate that this lock was eliminated. 1141 void set_eliminated_lock_counter() PRODUCT_RETURN; 1142 1143 public: 1144 AbstractLockNode(const TypeFunc *tf) 1145 : CallNode(tf, nullptr, TypeRawPtr::BOTTOM), 1146 _kind(Regular) 1147 { 1148 #ifndef PRODUCT 1149 _counter = nullptr; 1150 #endif 1151 } 1152 virtual int Opcode() const = 0; 1153 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1154 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1155 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1156 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1157 1158 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1159 1160 virtual uint size_of() const { return sizeof(*this); } 1161 1162 bool is_eliminated() const { return (_kind != Regular); } 1163 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1164 bool is_coarsened() const { return (_kind == Coarsened); } 1165 bool is_nested() const { return (_kind == Nested); } 1166 1167 const char * kind_as_string() const; 1168 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const; 1169 1170 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1171 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1172 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1173 1174 // locking does not modify its arguments 1175 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; } 1176 1177 #ifndef PRODUCT 1178 void create_lock_counter(JVMState* s); 1179 NamedCounter* counter() const { return _counter; } 1180 virtual void dump_spec(outputStream* st) const; 1181 virtual void dump_compact_spec(outputStream* st) const; 1182 #endif 1183 }; 1184 1185 //------------------------------Lock--------------------------------------- 1186 // High-level lock operation 1187 // 1188 // This is a subclass of CallNode because it is a macro node which gets expanded 1189 // into a code sequence containing a call. This node takes 3 "parameters": 1190 // 0 - object to lock 1191 // 1 - a BoxLockNode 1192 // 2 - a FastLockNode 1193 // 1194 class LockNode : public AbstractLockNode { 1195 public: 1196 1197 static const TypeFunc *lock_type() { 1198 // create input type (domain) 1199 const Type **fields = TypeTuple::fields(3); 1200 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1201 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1202 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1203 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1204 1205 // create result type (range) 1206 fields = TypeTuple::fields(0); 1207 1208 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1209 1210 return TypeFunc::make(domain,range); 1211 } 1212 1213 virtual int Opcode() const; 1214 virtual uint size_of() const; // Size is bigger 1215 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1216 init_class_id(Class_Lock); 1217 init_flags(Flag_is_macro); 1218 C->add_macro_node(this); 1219 } 1220 virtual bool guaranteed_safepoint() { return false; } 1221 1222 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1223 // Expansion modifies the JVMState, so we need to deep clone it 1224 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 1225 1226 bool is_nested_lock_region(); // Is this Lock nested? 1227 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1228 }; 1229 1230 //------------------------------Unlock--------------------------------------- 1231 // High-level unlock operation 1232 class UnlockNode : public AbstractLockNode { 1233 private: 1234 #ifdef ASSERT 1235 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1236 #endif 1237 public: 1238 virtual int Opcode() const; 1239 virtual uint size_of() const; // Size is bigger 1240 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1241 #ifdef ASSERT 1242 , _dbg_jvms(nullptr) 1243 #endif 1244 { 1245 init_class_id(Class_Unlock); 1246 init_flags(Flag_is_macro); 1247 C->add_macro_node(this); 1248 } 1249 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1250 // unlock is never a safepoint 1251 virtual bool guaranteed_safepoint() { return false; } 1252 #ifdef ASSERT 1253 void set_dbg_jvms(JVMState* s) { 1254 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1255 } 1256 JVMState* dbg_jvms() const { return _dbg_jvms; } 1257 #else 1258 JVMState* dbg_jvms() const { return nullptr; } 1259 #endif 1260 }; 1261 #endif // SHARE_OPTO_CALLNODE_HPP