1 /* 2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 #include "utilities/growableArray.hpp" 36 37 // Portions of code courtesy of Clifford Click 38 39 // Optimization - Graph Style 40 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class CallLeafVectorNode; 52 class CallNativeNode; 53 class AllocateNode; 54 class AllocateArrayNode; 55 class AbstractLockNode; 56 class LockNode; 57 class UnlockNode; 58 class FastLockNode; 59 60 //------------------------------StartNode-------------------------------------- 61 // The method start node 62 class StartNode : public MultiNode { 63 virtual bool cmp( const Node &n ) const; 64 virtual uint size_of() const; // Size is bigger 65 public: 66 const TypeTuple *_domain; 67 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 68 init_class_id(Class_Start); 69 init_req(0,this); 70 init_req(1,root); 71 } 72 virtual int Opcode() const; 73 virtual bool pinned() const { return true; }; 74 virtual const Type *bottom_type() const; 75 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 76 virtual const Type* Value(PhaseGVN* phase) const; 77 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 78 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 79 virtual const RegMask &in_RegMask(uint) const; 80 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 81 virtual uint ideal_reg() const { return 0; } 82 #ifndef PRODUCT 83 virtual void dump_spec(outputStream *st) const; 84 virtual void dump_compact_spec(outputStream *st) const; 85 #endif 86 }; 87 88 //------------------------------StartOSRNode----------------------------------- 89 // The method start node for on stack replacement code 90 class StartOSRNode : public StartNode { 91 public: 92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 93 virtual int Opcode() const; 94 }; 95 96 97 //------------------------------ParmNode--------------------------------------- 98 // Incoming parameters 99 class ParmNode : public ProjNode { 100 static const char * const names[TypeFunc::Parms+1]; 101 public: 102 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 103 init_class_id(Class_Parm); 104 } 105 virtual int Opcode() const; 106 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 107 virtual uint ideal_reg() const; 108 #ifndef PRODUCT 109 virtual void dump_spec(outputStream *st) const; 110 virtual void dump_compact_spec(outputStream *st) const; 111 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 112 #endif 113 }; 114 115 116 //------------------------------ReturnNode------------------------------------- 117 // Return from subroutine node 118 class ReturnNode : public Node { 119 public: 120 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 121 virtual int Opcode() const; 122 virtual bool is_CFG() const { return true; } 123 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 124 virtual bool depends_only_on_test() const { return false; } 125 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 126 virtual const Type* Value(PhaseGVN* phase) const; 127 virtual uint ideal_reg() const { return NotAMachineReg; } 128 virtual uint match_edge(uint idx) const; 129 #ifndef PRODUCT 130 virtual void dump_req(outputStream *st = tty) const; 131 #endif 132 }; 133 134 135 //------------------------------RethrowNode------------------------------------ 136 // Rethrow of exception at call site. Ends a procedure before rethrowing; 137 // ends the current basic block like a ReturnNode. Restores registers and 138 // unwinds stack. Rethrow happens in the caller's method. 139 class RethrowNode : public Node { 140 public: 141 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 142 virtual int Opcode() const; 143 virtual bool is_CFG() const { return true; } 144 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 145 virtual bool depends_only_on_test() const { return false; } 146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 147 virtual const Type* Value(PhaseGVN* phase) const; 148 virtual uint match_edge(uint idx) const; 149 virtual uint ideal_reg() const { return NotAMachineReg; } 150 #ifndef PRODUCT 151 virtual void dump_req(outputStream *st = tty) const; 152 #endif 153 }; 154 155 156 //------------------------------TailCallNode----------------------------------- 157 // Pop stack frame and jump indirect 158 class TailCallNode : public ReturnNode { 159 public: 160 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 161 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 162 init_req(TypeFunc::Parms, target); 163 init_req(TypeFunc::Parms+1, moop); 164 } 165 166 virtual int Opcode() const; 167 virtual uint match_edge(uint idx) const; 168 }; 169 170 //------------------------------TailJumpNode----------------------------------- 171 // Pop stack frame and jump indirect 172 class TailJumpNode : public ReturnNode { 173 public: 174 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 175 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 176 init_req(TypeFunc::Parms, target); 177 init_req(TypeFunc::Parms+1, ex_oop); 178 } 179 180 virtual int Opcode() const; 181 virtual uint match_edge(uint idx) const; 182 }; 183 184 //-------------------------------JVMState------------------------------------- 185 // A linked list of JVMState nodes captures the whole interpreter state, 186 // plus GC roots, for all active calls at some call site in this compilation 187 // unit. (If there is no inlining, then the list has exactly one link.) 188 // This provides a way to map the optimized program back into the interpreter, 189 // or to let the GC mark the stack. 190 class JVMState : public ResourceObj { 191 friend class VMStructs; 192 public: 193 typedef enum { 194 Reexecute_Undefined = -1, // not defined -- will be translated into false later 195 Reexecute_False = 0, // false -- do not reexecute 196 Reexecute_True = 1 // true -- reexecute the bytecode 197 } ReexecuteState; //Reexecute State 198 199 private: 200 JVMState* _caller; // List pointer for forming scope chains 201 uint _depth; // One more than caller depth, or one. 202 uint _locoff; // Offset to locals in input edge mapping 203 uint _stkoff; // Offset to stack in input edge mapping 204 uint _monoff; // Offset to monitors in input edge mapping 205 uint _scloff; // Offset to fields of scalar objs in input edge mapping 206 uint _endoff; // Offset to end of input edge mapping 207 uint _sp; // Jave Expression Stack Pointer for this state 208 int _bci; // Byte Code Index of this JVM point 209 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 210 ciMethod* _method; // Method Pointer 211 SafePointNode* _map; // Map node associated with this scope 212 public: 213 friend class Compile; 214 friend class PreserveReexecuteState; 215 216 // Because JVMState objects live over the entire lifetime of the 217 // Compile object, they are allocated into the comp_arena, which 218 // does not get resource marked or reset during the compile process 219 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 220 void operator delete( void * ) { } // fast deallocation 221 222 // Create a new JVMState, ready for abstract interpretation. 223 JVMState(ciMethod* method, JVMState* caller); 224 JVMState(int stack_size); // root state; has a null method 225 226 // Access functions for the JVM 227 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 228 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 229 uint locoff() const { return _locoff; } 230 uint stkoff() const { return _stkoff; } 231 uint argoff() const { return _stkoff + _sp; } 232 uint monoff() const { return _monoff; } 233 uint scloff() const { return _scloff; } 234 uint endoff() const { return _endoff; } 235 uint oopoff() const { return debug_end(); } 236 237 int loc_size() const { return stkoff() - locoff(); } 238 int stk_size() const { return monoff() - stkoff(); } 239 int mon_size() const { return scloff() - monoff(); } 240 int scl_size() const { return endoff() - scloff(); } 241 242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 246 247 uint sp() const { return _sp; } 248 int bci() const { return _bci; } 249 bool should_reexecute() const { return _reexecute==Reexecute_True; } 250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 251 bool has_method() const { return _method != NULL; } 252 ciMethod* method() const { assert(has_method(), ""); return _method; } 253 JVMState* caller() const { return _caller; } 254 SafePointNode* map() const { return _map; } 255 uint depth() const { return _depth; } 256 uint debug_start() const; // returns locoff of root caller 257 uint debug_end() const; // returns endoff of self 258 uint debug_size() const { 259 return loc_size() + sp() + mon_size() + scl_size(); 260 } 261 uint debug_depth() const; // returns sum of debug_size values at all depths 262 263 // Returns the JVM state at the desired depth (1 == root). 264 JVMState* of_depth(int d) const; 265 266 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 267 bool same_calls_as(const JVMState* that) const; 268 269 // Monitors (monitors are stored as (boxNode, objNode) pairs 270 enum { logMonitorEdges = 1 }; 271 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 275 bool is_monitor_box(uint off) const { 276 assert(is_mon(off), "should be called only for monitor edge"); 277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 278 } 279 bool is_monitor_use(uint off) const { return (is_mon(off) 280 && is_monitor_box(off)) 281 || (caller() && caller()->is_monitor_use(off)); } 282 283 // Initialization functions for the JVM 284 void set_locoff(uint off) { _locoff = off; } 285 void set_stkoff(uint off) { _stkoff = off; } 286 void set_monoff(uint off) { _monoff = off; } 287 void set_scloff(uint off) { _scloff = off; } 288 void set_endoff(uint off) { _endoff = off; } 289 void set_offsets(uint off) { 290 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 291 } 292 void set_map(SafePointNode* map) { _map = map; } 293 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode 294 void set_sp(uint sp) { _sp = sp; } 295 // _reexecute is initialized to "undefined" for a new bci 296 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 297 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 298 299 // Miscellaneous utility functions 300 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 301 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 302 void set_map_deep(SafePointNode *map);// reset map for all callers 303 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 304 int interpreter_frame_size() const; 305 306 #ifndef PRODUCT 307 void print_method_with_lineno(outputStream* st, bool show_name) const; 308 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 309 void dump_spec(outputStream *st) const; 310 void dump_on(outputStream* st) const; 311 void dump() const { 312 dump_on(tty); 313 } 314 #endif 315 }; 316 317 //------------------------------SafePointNode---------------------------------- 318 // A SafePointNode is a subclass of a MultiNode for convenience (and 319 // potential code sharing) only - conceptually it is independent of 320 // the Node semantics. 321 class SafePointNode : public MultiNode { 322 friend JVMState; 323 friend class GraphKit; 324 friend class VMStructs; 325 326 virtual bool cmp( const Node &n ) const; 327 virtual uint size_of() const; // Size is bigger 328 329 protected: 330 JVMState* const _jvms; // Pointer to list of JVM State objects 331 // Many calls take *all* of memory as input, 332 // but some produce a limited subset of that memory as output. 333 // The adr_type reports the call's behavior as a store, not a load. 334 const TypePtr* _adr_type; // What type of memory does this node produce? 335 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 336 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States 337 338 void set_jvms(JVMState* s) { 339 assert(s != nullptr, "assign NULL value to _jvms"); 340 *(JVMState**)&_jvms = s; // override const attribute in the accessor 341 } 342 public: 343 SafePointNode(uint edges, JVMState* jvms, 344 // A plain safepoint advertises no memory effects (NULL): 345 const TypePtr* adr_type = NULL) 346 : MultiNode( edges ), 347 _jvms(jvms), 348 _adr_type(adr_type), 349 _has_ea_local_in_scope(false) 350 { 351 init_class_id(Class_SafePoint); 352 } 353 354 JVMState* jvms() const { return _jvms; } 355 virtual bool needs_deep_clone_jvms(Compile* C) { return false; } 356 void clone_jvms(Compile* C) { 357 if (jvms() != NULL) { 358 if (needs_deep_clone_jvms(C)) { 359 set_jvms(jvms()->clone_deep(C)); 360 jvms()->set_map_deep(this); 361 } else { 362 jvms()->clone_shallow(C)->bind_map(this); 363 } 364 } 365 } 366 367 private: 368 void verify_input(JVMState* jvms, uint idx) const { 369 assert(verify_jvms(jvms), "jvms must match"); 370 Node* n = in(idx); 371 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 372 in(idx + 1)->is_top(), "2nd half of long/double"); 373 } 374 375 public: 376 // Functionality from old debug nodes which has changed 377 Node *local(JVMState* jvms, uint idx) const { 378 verify_input(jvms, jvms->locoff() + idx); 379 return in(jvms->locoff() + idx); 380 } 381 Node *stack(JVMState* jvms, uint idx) const { 382 verify_input(jvms, jvms->stkoff() + idx); 383 return in(jvms->stkoff() + idx); 384 } 385 Node *argument(JVMState* jvms, uint idx) const { 386 verify_input(jvms, jvms->argoff() + idx); 387 return in(jvms->argoff() + idx); 388 } 389 Node *monitor_box(JVMState* jvms, uint idx) const { 390 assert(verify_jvms(jvms), "jvms must match"); 391 return in(jvms->monitor_box_offset(idx)); 392 } 393 Node *monitor_obj(JVMState* jvms, uint idx) const { 394 assert(verify_jvms(jvms), "jvms must match"); 395 return in(jvms->monitor_obj_offset(idx)); 396 } 397 398 void set_local(JVMState* jvms, uint idx, Node *c); 399 400 void set_stack(JVMState* jvms, uint idx, Node *c) { 401 assert(verify_jvms(jvms), "jvms must match"); 402 set_req(jvms->stkoff() + idx, c); 403 } 404 void set_argument(JVMState* jvms, uint idx, Node *c) { 405 assert(verify_jvms(jvms), "jvms must match"); 406 set_req(jvms->argoff() + idx, c); 407 } 408 void ensure_stack(JVMState* jvms, uint stk_size) { 409 assert(verify_jvms(jvms), "jvms must match"); 410 int grow_by = (int)stk_size - (int)jvms->stk_size(); 411 if (grow_by > 0) grow_stack(jvms, grow_by); 412 } 413 void grow_stack(JVMState* jvms, uint grow_by); 414 // Handle monitor stack 415 void push_monitor( const FastLockNode *lock ); 416 void pop_monitor (); 417 Node *peek_monitor_box() const; 418 Node *peek_monitor_obj() const; 419 420 // Access functions for the JVM 421 Node *control () const { return in(TypeFunc::Control ); } 422 Node *i_o () const { return in(TypeFunc::I_O ); } 423 Node *memory () const { return in(TypeFunc::Memory ); } 424 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 425 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 426 427 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 428 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 429 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 430 431 MergeMemNode* merged_memory() const { 432 return in(TypeFunc::Memory)->as_MergeMem(); 433 } 434 435 // The parser marks useless maps as dead when it's done with them: 436 bool is_killed() { return in(TypeFunc::Control) == NULL; } 437 438 // Exception states bubbling out of subgraphs such as inlined calls 439 // are recorded here. (There might be more than one, hence the "next".) 440 // This feature is used only for safepoints which serve as "maps" 441 // for JVM states during parsing, intrinsic expansion, etc. 442 SafePointNode* next_exception() const; 443 void set_next_exception(SafePointNode* n); 444 bool has_exceptions() const { return next_exception() != NULL; } 445 446 // Helper methods to operate on replaced nodes 447 ReplacedNodes replaced_nodes() const { 448 return _replaced_nodes; 449 } 450 451 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 452 _replaced_nodes = replaced_nodes; 453 } 454 455 void clone_replaced_nodes() { 456 _replaced_nodes.clone(); 457 } 458 void record_replaced_node(Node* initial, Node* improved) { 459 _replaced_nodes.record(initial, improved); 460 } 461 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 462 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 463 } 464 void delete_replaced_nodes() { 465 _replaced_nodes.reset(); 466 } 467 void apply_replaced_nodes(uint idx) { 468 _replaced_nodes.apply(this, idx); 469 } 470 void merge_replaced_nodes_with(SafePointNode* sfpt) { 471 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 472 } 473 bool has_replaced_nodes() const { 474 return !_replaced_nodes.is_empty(); 475 } 476 void set_has_ea_local_in_scope(bool b) { 477 _has_ea_local_in_scope = b; 478 } 479 bool has_ea_local_in_scope() const { 480 return _has_ea_local_in_scope; 481 } 482 483 void disconnect_from_root(PhaseIterGVN *igvn); 484 485 // Standard Node stuff 486 virtual int Opcode() const; 487 virtual bool pinned() const { return true; } 488 virtual const Type* Value(PhaseGVN* phase) const; 489 virtual const Type* bottom_type() const { return Type::CONTROL; } 490 virtual const TypePtr* adr_type() const { return _adr_type; } 491 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 492 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 493 virtual Node* Identity(PhaseGVN* phase); 494 virtual uint ideal_reg() const { return 0; } 495 virtual const RegMask &in_RegMask(uint) const; 496 virtual const RegMask &out_RegMask() const; 497 virtual uint match_edge(uint idx) const; 498 499 #ifndef PRODUCT 500 virtual void dump_spec(outputStream *st) const; 501 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 502 #endif 503 }; 504 505 //------------------------------SafePointScalarObjectNode---------------------- 506 // A SafePointScalarObjectNode represents the state of a scalarized object 507 // at a safepoint. 508 509 class SafePointScalarObjectNode: public TypeNode { 510 uint _first_index; // First input edge relative index of a SafePoint node where 511 // states of the scalarized object fields are collected. 512 // It is relative to the last (youngest) jvms->_scloff. 513 uint _n_fields; // Number of non-static fields of the scalarized object. 514 bool _is_auto_box; // True if the scalarized object is an auto box. 515 DEBUG_ONLY(Node* _alloc;) 516 517 virtual uint hash() const ; // { return NO_HASH; } 518 virtual bool cmp( const Node &n ) const; 519 520 uint first_index() const { return _first_index; } 521 522 public: 523 SafePointScalarObjectNode(const TypeOopPtr* tp, 524 #ifdef ASSERT 525 Node* alloc, 526 #endif 527 uint first_index, uint n_fields, bool is_auto_box = false); 528 virtual int Opcode() const; 529 virtual uint ideal_reg() const; 530 virtual const RegMask &in_RegMask(uint) const; 531 virtual const RegMask &out_RegMask() const; 532 virtual uint match_edge(uint idx) const; 533 534 uint first_index(JVMState* jvms) const { 535 assert(jvms != NULL, "missed JVMS"); 536 return jvms->scloff() + _first_index; 537 } 538 uint n_fields() const { return _n_fields; } 539 540 bool is_auto_box() const { return _is_auto_box; } 541 #ifdef ASSERT 542 Node* alloc() const { return _alloc; } 543 #endif 544 545 virtual uint size_of() const { return sizeof(*this); } 546 547 // Assumes that "this" is an argument to a safepoint node "s", and that 548 // "new_call" is being created to correspond to "s". But the difference 549 // between the start index of the jvmstates of "new_call" and "s" is 550 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 551 // corresponds appropriately to "this" in "new_call". Assumes that 552 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 553 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 554 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const; 555 556 #ifndef PRODUCT 557 virtual void dump_spec(outputStream *st) const; 558 #endif 559 }; 560 561 562 // Simple container for the outgoing projections of a call. Useful 563 // for serious surgery on calls. 564 class CallProjections { 565 public: 566 Node* fallthrough_proj; 567 Node* fallthrough_catchproj; 568 Node* fallthrough_memproj; 569 Node* fallthrough_ioproj; 570 Node* catchall_catchproj; 571 Node* catchall_memproj; 572 Node* catchall_ioproj; 573 Node* exobj; 574 uint nb_resproj; 575 Node* resproj[1]; // at least one projection 576 577 CallProjections(uint nbres) { 578 fallthrough_proj = NULL; 579 fallthrough_catchproj = NULL; 580 fallthrough_memproj = NULL; 581 fallthrough_ioproj = NULL; 582 catchall_catchproj = NULL; 583 catchall_memproj = NULL; 584 catchall_ioproj = NULL; 585 exobj = NULL; 586 nb_resproj = nbres; 587 resproj[0] = NULL; 588 for (uint i = 1; i < nb_resproj; i++) { 589 resproj[i] = NULL; 590 } 591 } 592 593 }; 594 595 class CallGenerator; 596 597 //------------------------------CallNode--------------------------------------- 598 // Call nodes now subsume the function of debug nodes at callsites, so they 599 // contain the functionality of a full scope chain of debug nodes. 600 class CallNode : public SafePointNode { 601 friend class VMStructs; 602 603 protected: 604 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase); 605 606 public: 607 const TypeFunc* _tf; // Function type 608 address _entry_point; // Address of method being called 609 float _cnt; // Estimate of number of times called 610 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 611 const char* _name; // Printable name, if _method is NULL 612 613 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr) 614 : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type), 615 _tf(tf), 616 _entry_point(addr), 617 _cnt(COUNT_UNKNOWN), 618 _generator(NULL), 619 _name(NULL) 620 { 621 init_class_id(Class_Call); 622 } 623 624 const TypeFunc* tf() const { return _tf; } 625 const address entry_point() const { return _entry_point; } 626 const float cnt() const { return _cnt; } 627 CallGenerator* generator() const { return _generator; } 628 629 void set_tf(const TypeFunc* tf) { _tf = tf; } 630 void set_entry_point(address p) { _entry_point = p; } 631 void set_cnt(float c) { _cnt = c; } 632 void set_generator(CallGenerator* cg) { _generator = cg; } 633 634 virtual const Type* bottom_type() const; 635 virtual const Type* Value(PhaseGVN* phase) const; 636 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 637 virtual Node* Identity(PhaseGVN* phase) { return this; } 638 virtual bool cmp(const Node &n) const; 639 virtual uint size_of() const = 0; 640 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const; 641 virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask); 642 virtual uint ideal_reg() const { return NotAMachineReg; } 643 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 644 // for some macro nodes whose expansion does not have a safepoint on the fast path. 645 virtual bool guaranteed_safepoint() { return true; } 646 // For macro nodes, the JVMState gets modified during expansion. If calls 647 // use MachConstantBase, it gets modified during matching. So when cloning 648 // the node the JVMState must be deep cloned. Default is to shallow clone. 649 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); } 650 651 // Returns true if the call may modify n 652 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase); 653 // Does this node have a use of n other than in debug information? 654 bool has_non_debug_use(Node* n); 655 bool has_debug_use(Node* n); 656 // Returns the unique CheckCastPP of a call 657 // or result projection is there are several CheckCastPP 658 // or returns NULL if there is no one. 659 Node* result_cast(); 660 // Does this node returns pointer? 661 bool returns_pointer() const { 662 const TypeTuple* r = tf()->range_sig(); 663 return (!tf()->returns_inline_type_as_fields() && 664 r->cnt() > TypeFunc::Parms && 665 r->field_at(TypeFunc::Parms)->isa_ptr()); 666 } 667 668 // Collect all the interesting edges from a call for use in 669 // replacing the call by something else. Used by macro expansion 670 // and the late inlining support. 671 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); 672 673 virtual uint match_edge(uint idx) const; 674 675 bool is_call_to_arraycopystub() const; 676 677 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {} 678 679 #ifndef PRODUCT 680 virtual void dump_req(outputStream* st = tty) const; 681 virtual void dump_spec(outputStream* st) const; 682 #endif 683 }; 684 685 686 //------------------------------CallJavaNode----------------------------------- 687 // Make a static or dynamic subroutine call node using Java calling 688 // convention. (The "Java" calling convention is the compiler's calling 689 // convention, as opposed to the interpreter's or that of native C.) 690 class CallJavaNode : public CallNode { 691 friend class VMStructs; 692 protected: 693 virtual bool cmp( const Node &n ) const; 694 virtual uint size_of() const; // Size is bigger 695 696 bool _optimized_virtual; 697 bool _method_handle_invoke; 698 bool _override_symbolic_info; // Override symbolic call site info from bytecode 699 ciMethod* _method; // Method being direct called 700 bool _arg_escape; // ArgEscape in parameter list 701 public: 702 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method) 703 : CallNode(tf, addr, TypePtr::BOTTOM), 704 _optimized_virtual(false), 705 _method_handle_invoke(false), 706 _override_symbolic_info(false), 707 _method(method), 708 _arg_escape(false) 709 { 710 init_class_id(Class_CallJava); 711 } 712 713 virtual int Opcode() const; 714 ciMethod* method() const { return _method; } 715 void set_method(ciMethod *m) { _method = m; } 716 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 717 bool is_optimized_virtual() const { return _optimized_virtual; } 718 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 719 bool is_method_handle_invoke() const { return _method_handle_invoke; } 720 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 721 bool override_symbolic_info() const { return _override_symbolic_info; } 722 void set_arg_escape(bool f) { _arg_escape = f; } 723 bool arg_escape() const { return _arg_escape; } 724 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt); 725 726 DEBUG_ONLY( bool validate_symbolic_info() const; ) 727 728 #ifndef PRODUCT 729 virtual void dump_spec(outputStream *st) const; 730 virtual void dump_compact_spec(outputStream *st) const; 731 #endif 732 }; 733 734 //------------------------------CallStaticJavaNode----------------------------- 735 // Make a direct subroutine call using Java calling convention (for static 736 // calls and optimized virtual calls, plus calls to wrappers for run-time 737 // routines); generates static stub. 738 class CallStaticJavaNode : public CallJavaNode { 739 virtual bool cmp( const Node &n ) const; 740 virtual uint size_of() const; // Size is bigger 741 742 bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg); 743 744 public: 745 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method) 746 : CallJavaNode(tf, addr, method) { 747 init_class_id(Class_CallStaticJava); 748 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 749 init_flags(Flag_is_macro); 750 C->add_macro_node(this); 751 } 752 const TypeTuple *r = tf->range_sig(); 753 if (InlineTypeReturnedAsFields && 754 method != NULL && 755 method->is_method_handle_intrinsic() && 756 r->cnt() > TypeFunc::Parms && 757 r->field_at(TypeFunc::Parms)->isa_oopptr() && 758 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) { 759 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return 760 init_flags(Flag_is_macro); 761 C->add_macro_node(this); 762 } 763 } 764 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type) 765 : CallJavaNode(tf, addr, NULL) { 766 init_class_id(Class_CallStaticJava); 767 // This node calls a runtime stub, which often has narrow memory effects. 768 _adr_type = adr_type; 769 _name = name; 770 } 771 772 // If this is an uncommon trap, return the request code, else zero. 773 int uncommon_trap_request() const; 774 static int extract_uncommon_trap_request(const Node* call); 775 776 bool is_boxing_method() const { 777 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 778 } 779 // Late inlining modifies the JVMState, so we need to deep clone it 780 // when the call node is cloned (because it is macro node). 781 virtual bool needs_deep_clone_jvms(Compile* C) { 782 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C); 783 } 784 785 virtual int Opcode() const; 786 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 787 788 #ifndef PRODUCT 789 virtual void dump_spec(outputStream *st) const; 790 virtual void dump_compact_spec(outputStream *st) const; 791 #endif 792 }; 793 794 //------------------------------CallDynamicJavaNode---------------------------- 795 // Make a dispatched call using Java calling convention. 796 class CallDynamicJavaNode : public CallJavaNode { 797 virtual bool cmp( const Node &n ) const; 798 virtual uint size_of() const; // Size is bigger 799 public: 800 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index) 801 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) { 802 init_class_id(Class_CallDynamicJava); 803 } 804 805 // Late inlining modifies the JVMState, so we need to deep clone it 806 // when the call node is cloned. 807 virtual bool needs_deep_clone_jvms(Compile* C) { 808 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C); 809 } 810 811 int _vtable_index; 812 virtual int Opcode() const; 813 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 814 #ifndef PRODUCT 815 virtual void dump_spec(outputStream *st) const; 816 #endif 817 }; 818 819 //------------------------------CallRuntimeNode-------------------------------- 820 // Make a direct subroutine call node into compiled C++ code. 821 class CallRuntimeNode : public CallNode { 822 protected: 823 virtual bool cmp( const Node &n ) const; 824 virtual uint size_of() const; // Size is bigger 825 public: 826 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 827 const TypePtr* adr_type, JVMState* jvms = nullptr) 828 : CallNode(tf, addr, adr_type, jvms) 829 { 830 init_class_id(Class_CallRuntime); 831 _name = name; 832 } 833 834 virtual int Opcode() const; 835 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 836 837 #ifndef PRODUCT 838 virtual void dump_spec(outputStream *st) const; 839 #endif 840 }; 841 842 //------------------------------CallLeafNode----------------------------------- 843 // Make a direct subroutine call node into compiled C++ code, without 844 // safepoints 845 class CallLeafNode : public CallRuntimeNode { 846 public: 847 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 848 const TypePtr* adr_type) 849 : CallRuntimeNode(tf, addr, name, adr_type) 850 { 851 init_class_id(Class_CallLeaf); 852 } 853 virtual int Opcode() const; 854 virtual bool guaranteed_safepoint() { return false; } 855 #ifndef PRODUCT 856 virtual void dump_spec(outputStream *st) const; 857 #endif 858 }; 859 860 //------------------------------CallNativeNode----------------------------------- 861 // Make a direct call into a foreign function with an arbitrary ABI 862 // safepoints 863 class CallNativeNode : public CallNode { 864 friend class MachCallNativeNode; 865 virtual bool cmp( const Node &n ) const; 866 virtual uint size_of() const; 867 static void print_regs(const GrowableArray<VMReg>& regs, outputStream* st); 868 public: 869 GrowableArray<VMReg> _arg_regs; 870 GrowableArray<VMReg> _ret_regs; 871 const int _shadow_space_bytes; 872 const bool _need_transition; 873 874 CallNativeNode(const TypeFunc* tf, address addr, const char* name, 875 const TypePtr* adr_type, 876 const GrowableArray<VMReg>& arg_regs, 877 const GrowableArray<VMReg>& ret_regs, 878 int shadow_space_bytes, 879 bool need_transition) 880 : CallNode(tf, addr, adr_type), _arg_regs(arg_regs), 881 _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes), 882 _need_transition(need_transition) 883 { 884 init_class_id(Class_CallNative); 885 _name = name; 886 } 887 virtual int Opcode() const; 888 virtual bool guaranteed_safepoint() { return _need_transition; } 889 virtual Node* match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 890 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 891 #ifndef PRODUCT 892 virtual void dump_spec(outputStream *st) const; 893 #endif 894 }; 895 896 //------------------------------CallLeafNoFPNode------------------------------- 897 // CallLeafNode, not using floating point or using it in the same manner as 898 // the generated code 899 class CallLeafNoFPNode : public CallLeafNode { 900 public: 901 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 902 const TypePtr* adr_type) 903 : CallLeafNode(tf, addr, name, adr_type) 904 { 905 init_class_id(Class_CallLeafNoFP); 906 } 907 virtual int Opcode() const; 908 virtual uint match_edge(uint idx) const; 909 }; 910 911 //------------------------------CallLeafVectorNode------------------------------- 912 // CallLeafNode but calling with vector calling convention instead. 913 class CallLeafVectorNode : public CallLeafNode { 914 private: 915 uint _num_bits; 916 protected: 917 virtual bool cmp( const Node &n ) const; 918 virtual uint size_of() const; // Size is bigger 919 public: 920 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name, 921 const TypePtr* adr_type, uint num_bits) 922 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits) 923 { 924 } 925 virtual int Opcode() const; 926 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 927 }; 928 929 930 //------------------------------Allocate--------------------------------------- 931 // High-level memory allocation 932 // 933 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 934 // get expanded into a code sequence containing a call. Unlike other CallNodes, 935 // they have 2 memory projections and 2 i_o projections (which are distinguished by 936 // the _is_io_use flag in the projection.) This is needed when expanding the node in 937 // order to differentiate the uses of the projection on the normal control path from 938 // those on the exception return path. 939 // 940 class AllocateNode : public CallNode { 941 public: 942 enum { 943 // Output: 944 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 945 // Inputs: 946 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 947 KlassNode, // type (maybe dynamic) of the obj. 948 InitialTest, // slow-path test (may be constant) 949 ALength, // array length (or TOP if none) 950 InlineTypeNode, // InlineTypeNode if this is an inline type allocation 951 DefaultValue, // default value in case of non-flattened inline type array 952 RawDefaultValue, // same as above but as raw machine word 953 ValidLengthTest, 954 ParmLimit 955 }; 956 957 static const TypeFunc* alloc_type(const Type* t) { 958 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 959 fields[AllocSize] = TypeInt::POS; 960 fields[KlassNode] = TypeInstPtr::NOTNULL; 961 fields[InitialTest] = TypeInt::BOOL; 962 fields[ALength] = t; // length (can be a bad length) 963 fields[InlineTypeNode] = Type::BOTTOM; 964 fields[DefaultValue] = TypeInstPtr::NOTNULL; 965 fields[RawDefaultValue] = TypeX_X; 966 fields[ValidLengthTest] = TypeInt::BOOL; 967 968 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 969 970 // create result type (range) 971 fields = TypeTuple::fields(1); 972 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 973 974 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 975 976 return TypeFunc::make(domain, range); 977 } 978 979 // Result of Escape Analysis 980 bool _is_scalar_replaceable; 981 bool _is_non_escaping; 982 // True when MemBar for new is redundant with MemBar at initialzer exit 983 bool _is_allocation_MemBar_redundant; 984 bool _larval; 985 986 virtual uint size_of() const; // Size is bigger 987 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 988 Node *size, Node *klass_node, Node *initial_test, 989 InlineTypeBaseNode* inline_type_node = NULL); 990 // Expansion modifies the JVMState, so we need to deep clone it 991 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 992 virtual int Opcode() const; 993 virtual uint ideal_reg() const { return Op_RegP; } 994 virtual bool guaranteed_safepoint() { return false; } 995 996 // allocations do not modify their arguments 997 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 998 999 // Pattern-match a possible usage of AllocateNode. 1000 // Return null if no allocation is recognized. 1001 // The operand is the pointer produced by the (possible) allocation. 1002 // It must be a projection of the Allocate or its subsequent CastPP. 1003 // (Note: This function is defined in file graphKit.cpp, near 1004 // GraphKit::new_instance/new_array, whose output it recognizes.) 1005 // The 'ptr' may not have an offset unless the 'offset' argument is given. 1006 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 1007 1008 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 1009 // an offset, which is reported back to the caller. 1010 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 1011 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 1012 intptr_t& offset); 1013 1014 // Dig the klass operand out of a (possible) allocation site. 1015 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 1016 AllocateNode* allo = Ideal_allocation(ptr, phase); 1017 return (allo == NULL) ? NULL : allo->in(KlassNode); 1018 } 1019 1020 // Conservatively small estimate of offset of first non-header byte. 1021 int minimum_header_size() { 1022 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 1023 instanceOopDesc::base_offset_in_bytes(); 1024 } 1025 1026 // Return the corresponding initialization barrier (or null if none). 1027 // Walks out edges to find it... 1028 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1029 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1030 InitializeNode* initialization(); 1031 1032 // Convenience for initialization->maybe_set_complete(phase) 1033 bool maybe_set_complete(PhaseGVN* phase); 1034 1035 // Return true if allocation doesn't escape thread, its escape state 1036 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 1037 // is true when its allocation's escape state is noEscape or 1038 // ArgEscape. In case allocation's InitializeNode is NULL, check 1039 // AlllocateNode._is_non_escaping flag. 1040 // AlllocateNode._is_non_escaping is true when its escape state is 1041 // noEscape. 1042 bool does_not_escape_thread() { 1043 InitializeNode* init = NULL; 1044 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); 1045 } 1046 1047 // If object doesn't escape in <.init> method and there is memory barrier 1048 // inserted at exit of its <.init>, memory barrier for new is not necessary. 1049 // Inovke this method when MemBar at exit of initializer and post-dominate 1050 // allocation node. 1051 void compute_MemBar_redundancy(ciMethod* initializer); 1052 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 1053 1054 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem); 1055 }; 1056 1057 //------------------------------AllocateArray--------------------------------- 1058 // 1059 // High-level array allocation 1060 // 1061 class AllocateArrayNode : public AllocateNode { 1062 public: 1063 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node* size, Node* klass_node, 1064 Node* initial_test, Node* count_val, Node* valid_length_test, 1065 Node* default_value, Node* raw_default_value) 1066 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test) 1067 { 1068 init_class_id(Class_AllocateArray); 1069 set_req(AllocateNode::ALength, count_val); 1070 set_req(AllocateNode::ValidLengthTest, valid_length_test); 1071 init_req(AllocateNode::DefaultValue, default_value); 1072 init_req(AllocateNode::RawDefaultValue, raw_default_value); 1073 } 1074 virtual int Opcode() const; 1075 1076 // Dig the length operand out of a array allocation site. 1077 Node* Ideal_length() { 1078 return in(AllocateNode::ALength); 1079 } 1080 1081 // Dig the length operand out of a array allocation site and narrow the 1082 // type with a CastII, if necesssary 1083 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 1084 1085 // Pattern-match a possible usage of AllocateArrayNode. 1086 // Return null if no allocation is recognized. 1087 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 1088 AllocateNode* allo = Ideal_allocation(ptr, phase); 1089 return (allo == NULL || !allo->is_AllocateArray()) 1090 ? NULL : allo->as_AllocateArray(); 1091 } 1092 }; 1093 1094 //------------------------------AbstractLockNode----------------------------------- 1095 class AbstractLockNode: public CallNode { 1096 private: 1097 enum { 1098 Regular = 0, // Normal lock 1099 NonEscObj, // Lock is used for non escaping object 1100 Coarsened, // Lock was coarsened 1101 Nested // Nested lock 1102 } _kind; 1103 1104 static const char* _kind_names[Nested+1]; 1105 1106 #ifndef PRODUCT 1107 NamedCounter* _counter; 1108 #endif 1109 1110 protected: 1111 // helper functions for lock elimination 1112 // 1113 1114 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1115 GrowableArray<AbstractLockNode*> &lock_ops); 1116 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1117 GrowableArray<AbstractLockNode*> &lock_ops); 1118 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1119 GrowableArray<AbstractLockNode*> &lock_ops); 1120 LockNode *find_matching_lock(UnlockNode* unlock); 1121 1122 // Update the counter to indicate that this lock was eliminated. 1123 void set_eliminated_lock_counter() PRODUCT_RETURN; 1124 1125 public: 1126 AbstractLockNode(const TypeFunc *tf) 1127 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1128 _kind(Regular) 1129 { 1130 #ifndef PRODUCT 1131 _counter = NULL; 1132 #endif 1133 } 1134 virtual int Opcode() const = 0; 1135 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1136 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1137 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1138 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1139 1140 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1141 1142 virtual uint size_of() const { return sizeof(*this); } 1143 1144 bool is_eliminated() const { return (_kind != Regular); } 1145 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1146 bool is_coarsened() const { return (_kind == Coarsened); } 1147 bool is_nested() const { return (_kind == Nested); } 1148 1149 const char * kind_as_string() const; 1150 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const; 1151 1152 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1153 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1154 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1155 1156 // locking does not modify its arguments 1157 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1158 1159 #ifndef PRODUCT 1160 void create_lock_counter(JVMState* s); 1161 NamedCounter* counter() const { return _counter; } 1162 virtual void dump_spec(outputStream* st) const; 1163 virtual void dump_compact_spec(outputStream* st) const; 1164 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1165 #endif 1166 }; 1167 1168 //------------------------------Lock--------------------------------------- 1169 // High-level lock operation 1170 // 1171 // This is a subclass of CallNode because it is a macro node which gets expanded 1172 // into a code sequence containing a call. This node takes 3 "parameters": 1173 // 0 - object to lock 1174 // 1 - a BoxLockNode 1175 // 2 - a FastLockNode 1176 // 1177 class LockNode : public AbstractLockNode { 1178 public: 1179 1180 static const TypeFunc *lock_type() { 1181 // create input type (domain) 1182 const Type **fields = TypeTuple::fields(3); 1183 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1184 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1185 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1186 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1187 1188 // create result type (range) 1189 fields = TypeTuple::fields(0); 1190 1191 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1192 1193 return TypeFunc::make(domain, range); 1194 } 1195 1196 virtual int Opcode() const; 1197 virtual uint size_of() const; // Size is bigger 1198 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1199 init_class_id(Class_Lock); 1200 init_flags(Flag_is_macro); 1201 C->add_macro_node(this); 1202 } 1203 virtual bool guaranteed_safepoint() { return false; } 1204 1205 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1206 // Expansion modifies the JVMState, so we need to deep clone it 1207 virtual bool needs_deep_clone_jvms(Compile* C) { return true; } 1208 1209 bool is_nested_lock_region(); // Is this Lock nested? 1210 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1211 }; 1212 1213 //------------------------------Unlock--------------------------------------- 1214 // High-level unlock operation 1215 class UnlockNode : public AbstractLockNode { 1216 private: 1217 #ifdef ASSERT 1218 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1219 #endif 1220 public: 1221 virtual int Opcode() const; 1222 virtual uint size_of() const; // Size is bigger 1223 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1224 #ifdef ASSERT 1225 , _dbg_jvms(NULL) 1226 #endif 1227 { 1228 init_class_id(Class_Unlock); 1229 init_flags(Flag_is_macro); 1230 C->add_macro_node(this); 1231 } 1232 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1233 // unlock is never a safepoint 1234 virtual bool guaranteed_safepoint() { return false; } 1235 #ifdef ASSERT 1236 void set_dbg_jvms(JVMState* s) { 1237 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1238 } 1239 JVMState* dbg_jvms() const { return _dbg_jvms; } 1240 #else 1241 JVMState* dbg_jvms() const { return NULL; } 1242 #endif 1243 }; 1244 #endif // SHARE_OPTO_CALLNODE_HPP