1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_CALLNODE_HPP
26 #define SHARE_OPTO_CALLNODE_HPP
27
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/replacednodes.hpp"
34 #include "opto/type.hpp"
35 #include "utilities/growableArray.hpp"
36
37 // Portions of code courtesy of Clifford Click
38
39 // Optimization - Graph Style
40
41 class NamedCounter;
42 class MultiNode;
43 class SafePointNode;
44 class CallNode;
45 class CallJavaNode;
46 class CallStaticJavaNode;
47 class CallDynamicJavaNode;
48 class CallRuntimeNode;
49 class CallLeafNode;
50 class CallLeafNoFPNode;
51 class CallLeafVectorNode;
52 class AllocateNode;
53 class AllocateArrayNode;
54 class AbstractLockNode;
55 class LockNode;
56 class UnlockNode;
57 class FastLockNode;
58
59 //------------------------------StartNode--------------------------------------
60 // The method start node
61 class StartNode : public MultiNode {
62 virtual bool cmp( const Node &n ) const;
63 virtual uint size_of() const; // Size is bigger
64 public:
65 const TypeTuple *_domain;
66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
67 init_class_id(Class_Start);
68 init_req(0,this);
69 init_req(1,root);
70 }
71 virtual int Opcode() const;
72 virtual bool pinned() const { return true; };
73 virtual const Type *bottom_type() const;
74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
75 virtual const Type* Value(PhaseGVN* phase) const;
76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
78 virtual const RegMask &in_RegMask(uint) const;
79 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
80 virtual uint ideal_reg() const { return 0; }
81 #ifndef PRODUCT
82 virtual void dump_spec(outputStream *st) const;
83 virtual void dump_compact_spec(outputStream *st) const;
84 #endif
85 };
86
87 //------------------------------StartOSRNode-----------------------------------
88 // The method start node for on stack replacement code
89 class StartOSRNode : public StartNode {
90 public:
91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
92 virtual int Opcode() const;
93 };
94
95
96 //------------------------------ParmNode---------------------------------------
97 // Incoming parameters
98 class ParmNode : public ProjNode {
99 static const char * const names[TypeFunc::Parms+1];
100 public:
101 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
102 init_class_id(Class_Parm);
103 }
104 virtual int Opcode() const;
105 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
106 virtual uint ideal_reg() const;
107 #ifndef PRODUCT
108 virtual void dump_spec(outputStream *st) const;
109 virtual void dump_compact_spec(outputStream *st) const;
110 #endif
111 };
112
113
114 //------------------------------ReturnNode-------------------------------------
115 // Return from subroutine node
116 class ReturnNode : public Node {
117 public:
118 ReturnNode(uint edges, Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr);
119 virtual int Opcode() const;
120 virtual bool is_CFG() const { return true; }
121 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
122 virtual bool depends_only_on_test() const { return false; }
123 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
124 virtual const Type* Value(PhaseGVN* phase) const;
125 virtual uint ideal_reg() const { return NotAMachineReg; }
126 virtual uint match_edge(uint idx) const;
127 #ifndef PRODUCT
128 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
129 #endif
130 };
131
132
133 //------------------------------RethrowNode------------------------------------
134 // Rethrow of exception at call site. Ends a procedure before rethrowing;
135 // ends the current basic block like a ReturnNode. Restores registers and
136 // unwinds stack. Rethrow happens in the caller's method.
137 class RethrowNode : public Node {
138 public:
139 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
140 virtual int Opcode() const;
141 virtual bool is_CFG() const { return true; }
142 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
143 virtual bool depends_only_on_test() const { return false; }
144 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
145 virtual const Type* Value(PhaseGVN* phase) const;
146 virtual uint match_edge(uint idx) const;
147 virtual uint ideal_reg() const { return NotAMachineReg; }
148 #ifndef PRODUCT
149 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
150 #endif
151 };
152
153
154 //------------------------------ForwardExceptionNode---------------------------
155 // Pop stack frame and jump to StubRoutines::forward_exception_entry()
156 class ForwardExceptionNode : public ReturnNode {
157 public:
158 ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr)
159 : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) {
160 }
161
162 virtual int Opcode() const;
163 };
164
165 //------------------------------TailCallNode-----------------------------------
166 // Pop stack frame and jump indirect
167 class TailCallNode : public ReturnNode {
168 public:
169 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
170 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
171 init_req(TypeFunc::Parms, target);
172 init_req(TypeFunc::Parms+1, moop);
173 }
174
175 virtual int Opcode() const;
176 virtual uint match_edge(uint idx) const;
177 };
178
179 //------------------------------TailJumpNode-----------------------------------
180 // Pop stack frame and jump indirect
181 class TailJumpNode : public ReturnNode {
182 public:
183 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
184 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
185 init_req(TypeFunc::Parms, target);
186 init_req(TypeFunc::Parms+1, ex_oop);
187 }
188
189 virtual int Opcode() const;
190 virtual uint match_edge(uint idx) const;
191 };
192
193 //-------------------------------JVMState-------------------------------------
194 // A linked list of JVMState nodes captures the whole interpreter state,
195 // plus GC roots, for all active calls at some call site in this compilation
196 // unit. (If there is no inlining, then the list has exactly one link.)
197 // This provides a way to map the optimized program back into the interpreter,
198 // or to let the GC mark the stack.
199 class JVMState : public ResourceObj {
200 public:
201 typedef enum {
202 Reexecute_Undefined = -1, // not defined -- will be translated into false later
203 Reexecute_False = 0, // false -- do not reexecute
204 Reexecute_True = 1 // true -- reexecute the bytecode
205 } ReexecuteState; //Reexecute State
206
207 private:
208 JVMState* _caller; // List pointer for forming scope chains
209 uint _depth; // One more than caller depth, or one.
210 uint _locoff; // Offset to locals in input edge mapping
211 uint _stkoff; // Offset to stack in input edge mapping
212 uint _monoff; // Offset to monitors in input edge mapping
213 uint _scloff; // Offset to fields of scalar objs in input edge mapping
214 uint _endoff; // Offset to end of input edge mapping
215 uint _sp; // Java Expression Stack Pointer for this state
216 int _bci; // Byte Code Index of this JVM point
217 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
218 ciMethod* _method; // Method Pointer
219 ciInstance* _receiver_info; // Constant receiver instance for compiled lambda forms
220 SafePointNode* _map; // Map node associated with this scope
221 public:
222 friend class Compile;
223 friend class PreserveReexecuteState;
224
225 // Because JVMState objects live over the entire lifetime of the
226 // Compile object, they are allocated into the comp_arena, which
227 // does not get resource marked or reset during the compile process
228 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
229 void operator delete( void * ) { } // fast deallocation
230
231 // Create a new JVMState, ready for abstract interpretation.
232 JVMState(ciMethod* method, JVMState* caller);
233 JVMState(int stack_size); // root state; has a null method
234
235 // Access functions for the JVM
236 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
237 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
238 uint locoff() const { return _locoff; }
239 uint stkoff() const { return _stkoff; }
240 uint argoff() const { return _stkoff + _sp; }
241 uint monoff() const { return _monoff; }
242 uint scloff() const { return _scloff; }
243 uint endoff() const { return _endoff; }
244 uint oopoff() const { return debug_end(); }
245
246 int loc_size() const { return stkoff() - locoff(); }
247 int stk_size() const { return monoff() - stkoff(); }
248 int mon_size() const { return scloff() - monoff(); }
249 int scl_size() const { return endoff() - scloff(); }
250
251 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
252 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
253 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
254 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
255
256 uint sp() const { return _sp; }
257 int bci() const { return _bci; }
258 bool should_reexecute() const { return _reexecute==Reexecute_True; }
259 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
260 bool has_method() const { return _method != nullptr; }
261 ciMethod* method() const { assert(has_method(), ""); return _method; }
262 ciInstance* receiver_info() const { assert(has_method(), ""); return _receiver_info; }
263 JVMState* caller() const { return _caller; }
264 SafePointNode* map() const { return _map; }
265 uint depth() const { return _depth; }
266 uint debug_start() const; // returns locoff of root caller
267 uint debug_end() const; // returns endoff of self
268 uint debug_size() const {
269 return loc_size() + sp() + mon_size() + scl_size();
270 }
271 uint debug_depth() const; // returns sum of debug_size values at all depths
272
273 // Returns the JVM state at the desired depth (1 == root).
274 JVMState* of_depth(int d) const;
275
276 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
277 bool same_calls_as(const JVMState* that) const;
278
279 // Monitors (monitors are stored as (boxNode, objNode) pairs
280 enum { logMonitorEdges = 1 };
281 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
282 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
283 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
284 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
285 bool is_monitor_box(uint off) const {
286 assert(is_mon(off), "should be called only for monitor edge");
287 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
288 }
289 bool is_monitor_use(uint off) const { return (is_mon(off)
290 && is_monitor_box(off))
291 || (caller() && caller()->is_monitor_use(off)); }
292
293 // Initialization functions for the JVM
294 void set_locoff(uint off) { _locoff = off; }
295 void set_stkoff(uint off) { _stkoff = off; }
296 void set_monoff(uint off) { _monoff = off; }
297 void set_scloff(uint off) { _scloff = off; }
298 void set_endoff(uint off) { _endoff = off; }
299 void set_offsets(uint off) {
300 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
301 }
302 void set_map(SafePointNode* map) { _map = map; }
303 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
304 void set_sp(uint sp) { _sp = sp; }
305 // _reexecute is initialized to "undefined" for a new bci
306 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
307 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
308 void set_receiver_info(ciInstance* recv) { assert(has_method() || recv == nullptr, ""); _receiver_info = recv; }
309
310 // Miscellaneous utility functions
311 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
312 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
313 void set_map_deep(SafePointNode *map);// reset map for all callers
314 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
315 int interpreter_frame_size() const;
316 ciInstance* compute_receiver_info(ciMethod* callee) const;
317
318 #ifndef PRODUCT
319 void print_method_with_lineno(outputStream* st, bool show_name) const;
320 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
321 void dump_spec(outputStream *st) const;
322 void dump_on(outputStream* st) const;
323 void dump() const {
324 dump_on(tty);
325 }
326 #endif
327 };
328
329 //------------------------------SafePointNode----------------------------------
330 // A SafePointNode is a subclass of a MultiNode for convenience (and
331 // potential code sharing) only - conceptually it is independent of
332 // the Node semantics.
333 class SafePointNode : public MultiNode {
334 friend JVMState;
335 friend class GraphKit;
336 friend class LibraryCallKit;
337
338 virtual bool cmp( const Node &n ) const;
339 virtual uint size_of() const; // Size is bigger
340
341 protected:
342 JVMState* const _jvms; // Pointer to list of JVM State objects
343 // Many calls take *all* of memory as input,
344 // but some produce a limited subset of that memory as output.
345 // The adr_type reports the call's behavior as a store, not a load.
346 const TypePtr* _adr_type; // What type of memory does this node produce?
347 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
348 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
349
350 void set_jvms(JVMState* s) {
351 assert(s != nullptr, "assign null value to _jvms");
352 *(JVMState**)&_jvms = s; // override const attribute in the accessor
353 }
354 public:
355 SafePointNode(uint edges, JVMState* jvms,
356 // A plain safepoint advertises no memory effects (null):
357 const TypePtr* adr_type = nullptr)
358 : MultiNode( edges ),
359 _jvms(jvms),
360 _adr_type(adr_type),
361 _has_ea_local_in_scope(false)
362 {
363 init_class_id(Class_SafePoint);
364 }
365
366 JVMState* jvms() const { return _jvms; }
367 virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
368 void clone_jvms(Compile* C) {
369 if (jvms() != nullptr) {
370 if (needs_deep_clone_jvms(C)) {
371 set_jvms(jvms()->clone_deep(C));
372 jvms()->set_map_deep(this);
373 } else {
374 jvms()->clone_shallow(C)->bind_map(this);
375 }
376 }
377 }
378
379 private:
380 void verify_input(const JVMState* jvms, uint idx) const {
381 assert(verify_jvms(jvms), "jvms must match");
382 Node* n = in(idx);
383 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
384 in(idx + 1)->is_top(), "2nd half of long/double");
385 }
386
387 public:
388 // Functionality from old debug nodes which has changed
389 Node* local(const JVMState* jvms, uint idx) const {
390 uint loc_idx = jvms->locoff() + idx;
391 assert(jvms->is_loc(loc_idx), "not a local slot");
392 verify_input(jvms, loc_idx);
393 return in(loc_idx);
394 }
395 Node* stack(const JVMState* jvms, uint idx) const {
396 uint stk_idx = jvms->stkoff() + idx;
397 assert(jvms->is_stk(stk_idx), "not a stack slot");
398 verify_input(jvms, stk_idx);
399 return in(stk_idx);
400 }
401 Node* argument(const JVMState* jvms, uint idx) const {
402 uint arg_idx = jvms->argoff() + idx;
403 assert(jvms->is_stk(arg_idx), "not an argument slot");
404 verify_input(jvms, arg_idx);
405 return in(jvms->argoff() + idx);
406 }
407 Node* monitor_box(const JVMState* jvms, uint idx) const {
408 assert(verify_jvms(jvms), "jvms must match");
409 uint mon_box_idx = jvms->monitor_box_offset(idx);
410 assert(jvms->is_monitor_box(mon_box_idx), "not a monitor box offset");
411 return in(mon_box_idx);
412 }
413 Node* monitor_obj(const JVMState* jvms, uint idx) const {
414 assert(verify_jvms(jvms), "jvms must match");
415 uint mon_obj_idx = jvms->monitor_obj_offset(idx);
416 assert(jvms->is_mon(mon_obj_idx) && !jvms->is_monitor_box(mon_obj_idx), "not a monitor obj offset");
417 return in(mon_obj_idx);
418 }
419
420 void set_local(const JVMState* jvms, uint idx, Node *c);
421
422 void set_stack(const JVMState* jvms, uint idx, Node *c) {
423 assert(verify_jvms(jvms), "jvms must match");
424 set_req(jvms->stkoff() + idx, c);
425 }
426 void set_argument(const JVMState* jvms, uint idx, Node *c) {
427 assert(verify_jvms(jvms), "jvms must match");
428 set_req(jvms->argoff() + idx, c);
429 }
430 void ensure_stack(JVMState* jvms, uint stk_size) {
431 assert(verify_jvms(jvms), "jvms must match");
432 int grow_by = (int)stk_size - (int)jvms->stk_size();
433 if (grow_by > 0) grow_stack(jvms, grow_by);
434 }
435 void grow_stack(JVMState* jvms, uint grow_by);
436 // Handle monitor stack
437 void push_monitor( const FastLockNode *lock );
438 void pop_monitor ();
439 Node *peek_monitor_box() const;
440 Node *peek_monitor_obj() const;
441 // Peek Operand Stacks, JVMS 2.6.2
442 Node* peek_operand(uint off = 0) const;
443
444 // Access functions for the JVM
445 Node *control () const { return in(TypeFunc::Control ); }
446 Node *i_o () const { return in(TypeFunc::I_O ); }
447 Node *memory () const { return in(TypeFunc::Memory ); }
448 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
449 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
450
451 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
452 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
453 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
454
455 MergeMemNode* merged_memory() const {
456 return in(TypeFunc::Memory)->as_MergeMem();
457 }
458
459 // The parser marks useless maps as dead when it's done with them:
460 bool is_killed() { return in(TypeFunc::Control) == nullptr; }
461
462 // Exception states bubbling out of subgraphs such as inlined calls
463 // are recorded here. (There might be more than one, hence the "next".)
464 // This feature is used only for safepoints which serve as "maps"
465 // for JVM states during parsing, intrinsic expansion, etc.
466 SafePointNode* next_exception() const;
467 void set_next_exception(SafePointNode* n);
468 bool has_exceptions() const { return next_exception() != nullptr; }
469
470 // Helper methods to operate on replaced nodes
471 ReplacedNodes replaced_nodes() const {
472 return _replaced_nodes;
473 }
474
475 void set_replaced_nodes(ReplacedNodes replaced_nodes) {
476 _replaced_nodes = replaced_nodes;
477 }
478
479 void clone_replaced_nodes() {
480 _replaced_nodes.clone();
481 }
482 void record_replaced_node(Node* initial, Node* improved) {
483 _replaced_nodes.record(initial, improved);
484 }
485 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
486 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
487 }
488 void delete_replaced_nodes() {
489 _replaced_nodes.reset();
490 }
491 void apply_replaced_nodes(uint idx) {
492 _replaced_nodes.apply(this, idx);
493 }
494 void merge_replaced_nodes_with(SafePointNode* sfpt) {
495 _replaced_nodes.merge_with(sfpt->_replaced_nodes);
496 }
497 bool has_replaced_nodes() const {
498 return !_replaced_nodes.is_empty();
499 }
500 void set_has_ea_local_in_scope(bool b) {
501 _has_ea_local_in_scope = b;
502 }
503 bool has_ea_local_in_scope() const {
504 return _has_ea_local_in_scope;
505 }
506
507 void disconnect_from_root(PhaseIterGVN *igvn);
508
509 // Standard Node stuff
510 virtual int Opcode() const;
511 virtual bool pinned() const { return true; }
512 virtual const Type* Value(PhaseGVN* phase) const;
513 virtual const Type* bottom_type() const { return Type::CONTROL; }
514 virtual const TypePtr* adr_type() const { return _adr_type; }
515 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
516 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
517 virtual Node* Identity(PhaseGVN* phase);
518 virtual uint ideal_reg() const { return 0; }
519 virtual const RegMask &in_RegMask(uint) const;
520 virtual const RegMask &out_RegMask() const;
521 virtual uint match_edge(uint idx) const;
522
523 #ifndef PRODUCT
524 virtual void dump_spec(outputStream *st) const;
525 #endif
526 };
527
528 //------------------------------SafePointScalarObjectNode----------------------
529 // A SafePointScalarObjectNode represents the state of a scalarized object
530 // at a safepoint.
531 class SafePointScalarObjectNode: public TypeNode {
532 uint _first_index; // First input edge relative index of a SafePoint node where
533 // states of the scalarized object fields are collected.
534 uint _depth; // Depth of the JVM state the _first_index field refers to
535 uint _n_fields; // Number of non-static fields of the scalarized object.
536
537 Node* _alloc; // Just for debugging purposes.
538
539 virtual uint hash() const;
540 virtual bool cmp( const Node &n ) const;
541
542 uint first_index() const { return _first_index; }
543
544 public:
545 SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields);
546
547 virtual int Opcode() const;
548 virtual uint ideal_reg() const;
549 virtual const RegMask &in_RegMask(uint) const;
550 virtual const RegMask &out_RegMask() const;
551 virtual uint match_edge(uint idx) const;
552
553 uint first_index(JVMState* jvms) const {
554 assert(jvms != nullptr, "missed JVMS");
555 return jvms->of_depth(_depth)->scloff() + _first_index;
556 }
557 uint n_fields() const { return _n_fields; }
558
559 #ifdef ASSERT
560 Node* alloc() const { return _alloc; }
561 #endif
562
563 virtual uint size_of() const { return sizeof(*this); }
564
565 // Assumes that "this" is an argument to a safepoint node "s", and that
566 // "new_call" is being created to correspond to "s". But the difference
567 // between the start index of the jvmstates of "new_call" and "s" is
568 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
569 // corresponds appropriately to "this" in "new_call". Assumes that
570 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
571 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
572 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
573
574 #ifndef PRODUCT
575 virtual void dump_spec(outputStream *st) const;
576 #endif
577 };
578
579 //------------------------------SafePointScalarMergeNode----------------------
580 //
581 // This class represents an allocation merge that is used as debug information
582 // and had at least one of its input scalar replaced.
583 //
584 // The required inputs of this node, except the control, are pointers to
585 // SafePointScalarObjectNodes that describe scalarized inputs of the original
586 // allocation merge. The other(s) properties of the class are described below.
587 //
588 // _merge_pointer_idx : index in the SafePointNode's input array where the
589 // description of the _allocation merge_ starts. The index is zero based and
590 // relative to the SafePoint's scloff. The two entries in the SafePointNode's
591 // input array starting at '_merge_pointer_idx` are Phi nodes representing:
592 //
593 // 1) The original merge Phi. During rematerialization this input will only be
594 // used if the "selector Phi" (see below) indicates that the execution of the
595 // Phi took the path of a non scalarized input.
596 //
597 // 2) A "selector Phi". The output of this Phi will be '-1' if the execution
598 // of the method exercised a non scalarized input of the original Phi.
599 // Otherwise, the output will be >=0, and it will indicate the index-1 in the
600 // SafePointScalarMergeNode input array where the description of the
601 // scalarized object that should be used is.
602 //
603 // As an example, consider a Phi merging 3 inputs, of which the last 2 are
604 // scalar replaceable.
605 //
606 // Phi(Region, NSR, SR, SR)
607 //
608 // During scalar replacement the SR inputs will be changed to null:
609 //
610 // Phi(Region, NSR, nullptr, nullptr)
611 //
612 // A corresponding selector Phi will be created with a configuration like this:
613 //
614 // Phi(Region, -1, 0, 1)
615 //
616 // During execution of the compiled method, if the execution reaches a Trap, the
617 // output of the selector Phi will tell if we need to rematerialize one of the
618 // scalar replaced inputs or if we should just use the pointer returned by the
619 // original Phi.
620
621 class SafePointScalarMergeNode: public TypeNode {
622 int _merge_pointer_idx; // This is the first input edge relative
623 // index of a SafePoint node where metadata information relative
624 // to restoring the merge is stored. The corresponding input
625 // in the associated SafePoint will point to a Phi representing
626 // potential non-scalar replaced objects.
627
628 virtual uint hash() const;
629 virtual bool cmp( const Node &n ) const;
630
631 public:
632 SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx);
633
634 virtual int Opcode() const;
635 virtual uint ideal_reg() const;
636 virtual const RegMask &in_RegMask(uint) const;
637 virtual const RegMask &out_RegMask() const;
638 virtual uint match_edge(uint idx) const;
639
640 virtual uint size_of() const { return sizeof(*this); }
641
642 int merge_pointer_idx(JVMState* jvms) const {
643 assert(jvms != nullptr, "JVMS reference is null.");
644 return jvms->scloff() + _merge_pointer_idx;
645 }
646
647 int selector_idx(JVMState* jvms) const {
648 assert(jvms != nullptr, "JVMS reference is null.");
649 return jvms->scloff() + _merge_pointer_idx + 1;
650 }
651
652 // Assumes that "this" is an argument to a safepoint node "s", and that
653 // "new_call" is being created to correspond to "s". But the difference
654 // between the start index of the jvmstates of "new_call" and "s" is
655 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
656 // corresponds appropriately to "this" in "new_call". Assumes that
657 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
658 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
659 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
660
661 #ifndef PRODUCT
662 virtual void dump_spec(outputStream *st) const;
663 #endif
664 };
665
666 // Simple container for the outgoing projections of a call. Useful
667 // for serious surgery on calls.
668 class CallProjections {
669 public:
670 Node* fallthrough_proj;
671 Node* fallthrough_catchproj;
672 Node* fallthrough_memproj;
673 Node* fallthrough_ioproj;
674 Node* catchall_catchproj;
675 Node* catchall_memproj;
676 Node* catchall_ioproj;
677 Node* exobj;
678 uint nb_resproj;
679 Node* resproj[1]; // at least one projection
680
681 CallProjections(uint nbres) {
682 fallthrough_proj = nullptr;
683 fallthrough_catchproj = nullptr;
684 fallthrough_memproj = nullptr;
685 fallthrough_ioproj = nullptr;
686 catchall_catchproj = nullptr;
687 catchall_memproj = nullptr;
688 catchall_ioproj = nullptr;
689 exobj = nullptr;
690 nb_resproj = nbres;
691 resproj[0] = nullptr;
692 for (uint i = 1; i < nb_resproj; i++) {
693 resproj[i] = nullptr;
694 }
695 }
696
697 };
698
699 class CallGenerator;
700
701 //------------------------------CallNode---------------------------------------
702 // Call nodes now subsume the function of debug nodes at callsites, so they
703 // contain the functionality of a full scope chain of debug nodes.
704 class CallNode : public SafePointNode {
705
706 protected:
707 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
708
709 public:
710 const TypeFunc* _tf; // Function type
711 address _entry_point; // Address of method being called
712 float _cnt; // Estimate of number of times called
713 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
714 const char* _name; // Printable name, if _method is null
715
716 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
717 : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
718 _tf(tf),
719 _entry_point(addr),
720 _cnt(COUNT_UNKNOWN),
721 _generator(nullptr),
722 _name(nullptr)
723 {
724 init_class_id(Class_Call);
725 }
726
727 const TypeFunc* tf() const { return _tf; }
728 address entry_point() const { return _entry_point; }
729 float cnt() const { return _cnt; }
730 CallGenerator* generator() const { return _generator; }
731
732 void set_tf(const TypeFunc* tf) { _tf = tf; }
733 void set_entry_point(address p) { _entry_point = p; }
734 void set_cnt(float c) { _cnt = c; }
735 void set_generator(CallGenerator* cg) { _generator = cg; }
736
737 virtual const Type* bottom_type() const;
738 virtual const Type* Value(PhaseGVN* phase) const;
739 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
740 virtual Node* Identity(PhaseGVN* phase) { return this; }
741 virtual bool cmp(const Node &n) const;
742 virtual uint size_of() const = 0;
743 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
744 virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
745 virtual uint ideal_reg() const { return NotAMachineReg; }
746 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
747 // for some macro nodes whose expansion does not have a safepoint on the fast path.
748 virtual bool guaranteed_safepoint() { return true; }
749 // For macro nodes, the JVMState gets modified during expansion. If calls
750 // use MachConstantBase, it gets modified during matching. If the call is
751 // late inlined, it also needs the full JVMState. So when cloning the
752 // node the JVMState must be deep cloned. Default is to shallow clone.
753 virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
754
755 // Returns true if the call may modify n
756 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
757 // Does this node have a use of n other than in debug information?
758 bool has_non_debug_use(Node* n);
759 bool has_debug_use(Node* n);
760 // Returns the unique CheckCastPP of a call
761 // or result projection is there are several CheckCastPP
762 // or returns null if there is no one.
763 Node* result_cast();
764 // Does this node returns pointer?
765 bool returns_pointer() const {
766 const TypeTuple* r = tf()->range_sig();
767 return (!tf()->returns_inline_type_as_fields() &&
768 r->cnt() > TypeFunc::Parms &&
769 r->field_at(TypeFunc::Parms)->isa_ptr());
770 }
771
772 // Collect all the interesting edges from a call for use in
773 // replacing the call by something else. Used by macro expansion
774 // and the late inlining support.
775 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true) const;
776
777 virtual uint match_edge(uint idx) const;
778
779 bool is_call_to_arraycopystub() const;
780 bool is_call_to_multianewarray_stub() const;
781
782 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
783
784 #ifndef PRODUCT
785 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
786 virtual void dump_spec(outputStream* st) const;
787 #endif
788 };
789
790
791 //------------------------------CallJavaNode-----------------------------------
792 // Make a static or dynamic subroutine call node using Java calling
793 // convention. (The "Java" calling convention is the compiler's calling
794 // convention, as opposed to the interpreter's or that of native C.)
795 class CallJavaNode : public CallNode {
796 protected:
797 virtual bool cmp( const Node &n ) const;
798 virtual uint size_of() const; // Size is bigger
799
800 ciMethod* _method; // Method being direct called
801 bool _optimized_virtual;
802 bool _override_symbolic_info; // Override symbolic call site info from bytecode
803 bool _arg_escape; // ArgEscape in parameter list
804 public:
805 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
806 : CallNode(tf, addr, TypePtr::BOTTOM),
807 _method(method),
808 _optimized_virtual(false),
809 _override_symbolic_info(false),
810 _arg_escape(false)
811 {
812 init_class_id(Class_CallJava);
813 }
814
815 virtual int Opcode() const;
816 ciMethod* method() const { return _method; }
817 void set_method(ciMethod *m) { _method = m; }
818 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
819 bool is_optimized_virtual() const { return _optimized_virtual; }
820 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
821 bool override_symbolic_info() const { return _override_symbolic_info; }
822 void set_arg_escape(bool f) { _arg_escape = f; }
823 bool arg_escape() const { return _arg_escape; }
824 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
825 void register_for_late_inline();
826
827 DEBUG_ONLY( bool validate_symbolic_info() const; )
828
829 #ifndef PRODUCT
830 virtual void dump_spec(outputStream *st) const;
831 virtual void dump_compact_spec(outputStream *st) const;
832 #endif
833 };
834
835 //------------------------------CallStaticJavaNode-----------------------------
836 // Make a direct subroutine call using Java calling convention (for static
837 // calls and optimized virtual calls, plus calls to wrappers for run-time
838 // routines); generates static stub.
839 class CallStaticJavaNode : public CallJavaNode {
840 virtual bool cmp( const Node &n ) const;
841 virtual uint size_of() const; // Size is bigger
842
843 bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
844
845 public:
846 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
847 : CallJavaNode(tf, addr, method) {
848 init_class_id(Class_CallStaticJava);
849 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
850 init_flags(Flag_is_macro);
851 C->add_macro_node(this);
852 }
853 const TypeTuple *r = tf->range_sig();
854 if (InlineTypeReturnedAsFields &&
855 method != nullptr &&
856 method->is_method_handle_intrinsic() &&
857 r->cnt() > TypeFunc::Parms &&
858 r->field_at(TypeFunc::Parms)->isa_oopptr() &&
859 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
860 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
861 init_flags(Flag_is_macro);
862 C->add_macro_node(this);
863 }
864 }
865 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
866 : CallJavaNode(tf, addr, nullptr) {
867 init_class_id(Class_CallStaticJava);
868 // This node calls a runtime stub, which often has narrow memory effects.
869 _adr_type = adr_type;
870 _name = name;
871 }
872
873 // If this is an uncommon trap, return the request code, else zero.
874 int uncommon_trap_request() const;
875 bool is_uncommon_trap() const;
876 static int extract_uncommon_trap_request(const Node* call);
877
878 bool is_boxing_method() const {
879 return is_macro() && (method() != nullptr) && method()->is_boxing_method();
880 }
881 // Late inlining modifies the JVMState, so we need to deep clone it
882 // when the call node is cloned (because it is macro node).
883 virtual bool needs_deep_clone_jvms(Compile* C) {
884 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
885 }
886
887 virtual int Opcode() const;
888 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
889
890 #ifndef PRODUCT
891 virtual void dump_spec(outputStream *st) const;
892 virtual void dump_compact_spec(outputStream *st) const;
893 #endif
894 };
895
896 //------------------------------CallDynamicJavaNode----------------------------
897 // Make a dispatched call using Java calling convention.
898 class CallDynamicJavaNode : public CallJavaNode {
899 virtual bool cmp( const Node &n ) const;
900 virtual uint size_of() const; // Size is bigger
901 public:
902 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
903 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
904 init_class_id(Class_CallDynamicJava);
905 }
906
907 // Late inlining modifies the JVMState, so we need to deep clone it
908 // when the call node is cloned.
909 virtual bool needs_deep_clone_jvms(Compile* C) {
910 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
911 }
912
913 int _vtable_index;
914 virtual int Opcode() const;
915 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
916 #ifndef PRODUCT
917 virtual void dump_spec(outputStream *st) const;
918 #endif
919 };
920
921 //------------------------------CallRuntimeNode--------------------------------
922 // Make a direct subroutine call node into compiled C++ code.
923 class CallRuntimeNode : public CallNode {
924 protected:
925 virtual bool cmp( const Node &n ) const;
926 virtual uint size_of() const; // Size is bigger
927 public:
928 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
929 const TypePtr* adr_type, JVMState* jvms = nullptr)
930 : CallNode(tf, addr, adr_type, jvms)
931 {
932 init_class_id(Class_CallRuntime);
933 _name = name;
934 }
935
936 virtual int Opcode() const;
937 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
938
939 #ifndef PRODUCT
940 virtual void dump_spec(outputStream *st) const;
941 #endif
942 };
943
944 //------------------------------CallLeafNode-----------------------------------
945 // Make a direct subroutine call node into compiled C++ code, without
946 // safepoints
947 class CallLeafNode : public CallRuntimeNode {
948 public:
949 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
950 const TypePtr* adr_type)
951 : CallRuntimeNode(tf, addr, name, adr_type)
952 {
953 init_class_id(Class_CallLeaf);
954 }
955 virtual int Opcode() const;
956 virtual bool guaranteed_safepoint() { return false; }
957 #ifndef PRODUCT
958 virtual void dump_spec(outputStream *st) const;
959 #endif
960 };
961
962 /* A pure function call, they are assumed not to be safepoints, not to read or write memory,
963 * have no exception... They just take parameters, return a value without side effect. It is
964 * always correct to create some, or remove them, if the result is not used.
965 *
966 * They still have control input to allow easy lowering into other kind of calls that require
967 * a control, but this is more a technical than a moral constraint.
968 *
969 * Pure calls must have only control and data input and output: I/O, Memory and so on must be top.
970 * Nevertheless, pure calls can typically be expensive math operations so care must be taken
971 * when letting the node float.
972 */
973 class CallLeafPureNode : public CallLeafNode {
974 protected:
975 bool is_unused() const;
976 bool is_dead() const;
977 TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const;
978
979 public:
980 CallLeafPureNode(const TypeFunc* tf, address addr, const char* name)
981 : CallLeafNode(tf, addr, name, nullptr) {
982 init_class_id(Class_CallLeafPure);
983 }
984 int Opcode() const override;
985 Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
986 };
987
988 //------------------------------CallLeafNoFPNode-------------------------------
989 // CallLeafNode, not using floating point or using it in the same manner as
990 // the generated code
991 class CallLeafNoFPNode : public CallLeafNode {
992 public:
993 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
994 const TypePtr* adr_type)
995 : CallLeafNode(tf, addr, name, adr_type)
996 {
997 init_class_id(Class_CallLeafNoFP);
998 }
999 virtual int Opcode() const;
1000 virtual uint match_edge(uint idx) const;
1001 };
1002
1003 //------------------------------CallLeafVectorNode-------------------------------
1004 // CallLeafNode but calling with vector calling convention instead.
1005 class CallLeafVectorNode : public CallLeafNode {
1006 private:
1007 uint _num_bits;
1008 protected:
1009 virtual bool cmp( const Node &n ) const;
1010 virtual uint size_of() const; // Size is bigger
1011 public:
1012 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1013 const TypePtr* adr_type, uint num_bits)
1014 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1015 {
1016 }
1017 virtual int Opcode() const;
1018 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1019 };
1020
1021
1022 //------------------------------Allocate---------------------------------------
1023 // High-level memory allocation
1024 //
1025 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1026 // get expanded into a code sequence containing a call. Unlike other CallNodes,
1027 // they have 2 memory projections and 2 i_o projections (which are distinguished by
1028 // the _is_io_use flag in the projection.) This is needed when expanding the node in
1029 // order to differentiate the uses of the projection on the normal control path from
1030 // those on the exception return path.
1031 //
1032 class AllocateNode : public CallNode {
1033 public:
1034 enum {
1035 // Output:
1036 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
1037 // Inputs:
1038 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
1039 KlassNode, // type (maybe dynamic) of the obj.
1040 InitialTest, // slow-path test (may be constant)
1041 ALength, // array length (or TOP if none)
1042 ValidLengthTest,
1043 InlineType, // InlineTypeNode if this is an inline type allocation
1044 InitValue, // Init value for null-free inline type arrays
1045 RawInitValue, // Same as above but as raw machine word
1046 ParmLimit
1047 };
1048
1049 static const TypeFunc* alloc_type(const Type* t) {
1050 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1051 fields[AllocSize] = TypeInt::POS;
1052 fields[KlassNode] = TypeInstPtr::NOTNULL;
1053 fields[InitialTest] = TypeInt::BOOL;
1054 fields[ALength] = t; // length (can be a bad length)
1055 fields[ValidLengthTest] = TypeInt::BOOL;
1056 fields[InlineType] = Type::BOTTOM;
1057 fields[InitValue] = TypeInstPtr::NOTNULL;
1058 fields[RawInitValue] = TypeX_X;
1059
1060 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1061
1062 // create result type (range)
1063 fields = TypeTuple::fields(1);
1064 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1065
1066 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1067
1068 return TypeFunc::make(domain, range);
1069 }
1070
1071 // Result of Escape Analysis
1072 bool _is_scalar_replaceable;
1073 bool _is_non_escaping;
1074 // True when MemBar for new is redundant with MemBar at initialzer exit
1075 bool _is_allocation_MemBar_redundant;
1076 bool _larval;
1077
1078 virtual uint size_of() const; // Size is bigger
1079 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1080 Node *size, Node *klass_node, Node *initial_test,
1081 InlineTypeNode* inline_type_node = nullptr);
1082 // Expansion modifies the JVMState, so we need to deep clone it
1083 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1084 virtual int Opcode() const;
1085 virtual uint ideal_reg() const { return Op_RegP; }
1086 virtual bool guaranteed_safepoint() { return false; }
1087
1088 // allocations do not modify their arguments
1089 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1090
1091 // Pattern-match a possible usage of AllocateNode.
1092 // Return null if no allocation is recognized.
1093 // The operand is the pointer produced by the (possible) allocation.
1094 // It must be a projection of the Allocate or its subsequent CastPP.
1095 // (Note: This function is defined in file graphKit.cpp, near
1096 // GraphKit::new_instance/new_array, whose output it recognizes.)
1097 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1098 static AllocateNode* Ideal_allocation(Node* ptr);
1099
1100 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1101 // an offset, which is reported back to the caller.
1102 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
1103 static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase,
1104 intptr_t& offset);
1105
1106 // Dig the klass operand out of a (possible) allocation site.
1107 static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
1108 AllocateNode* allo = Ideal_allocation(ptr);
1109 return (allo == nullptr) ? nullptr : allo->in(KlassNode);
1110 }
1111
1112 // Conservatively small estimate of offset of first non-header byte.
1113 int minimum_header_size() {
1114 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
1115 instanceOopDesc::base_offset_in_bytes();
1116 }
1117
1118 // Return the corresponding initialization barrier (or null if none).
1119 // Walks out edges to find it...
1120 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1121 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1122 InitializeNode* initialization();
1123
1124 // Convenience for initialization->maybe_set_complete(phase)
1125 bool maybe_set_complete(PhaseGVN* phase);
1126
1127 // Return true if allocation doesn't escape thread, its escape state
1128 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1129 // is true when its allocation's escape state is noEscape or
1130 // ArgEscape. In case allocation's InitializeNode is null, check
1131 // AlllocateNode._is_non_escaping flag.
1132 // AlllocateNode._is_non_escaping is true when its escape state is
1133 // noEscape.
1134 bool does_not_escape_thread() {
1135 InitializeNode* init = nullptr;
1136 return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1137 }
1138
1139 // If object doesn't escape in <.init> method and there is memory barrier
1140 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1141 // Inovke this method when MemBar at exit of initializer and post-dominate
1142 // allocation node.
1143 void compute_MemBar_redundancy(ciMethod* initializer);
1144 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1145
1146 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1147
1148 NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1149 };
1150
1151 //------------------------------AllocateArray---------------------------------
1152 //
1153 // High-level array allocation
1154 //
1155 class AllocateArrayNode : public AllocateNode {
1156 public:
1157 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1158 Node* initial_test, Node* count_val, Node* valid_length_test,
1159 Node* init_value, Node* raw_init_value)
1160 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1161 initial_test)
1162 {
1163 init_class_id(Class_AllocateArray);
1164 set_req(AllocateNode::ALength, count_val);
1165 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1166 init_req(AllocateNode::InitValue, init_value);
1167 init_req(AllocateNode::RawInitValue, raw_init_value);
1168 }
1169 virtual uint size_of() const { return sizeof(*this); }
1170 virtual int Opcode() const;
1171
1172 // Dig the length operand out of a array allocation site.
1173 Node* Ideal_length() {
1174 return in(AllocateNode::ALength);
1175 }
1176
1177 // Dig the length operand out of a array allocation site and narrow the
1178 // type with a CastII, if necesssary
1179 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1180
1181 // Pattern-match a possible usage of AllocateArrayNode.
1182 // Return null if no allocation is recognized.
1183 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1184 AllocateNode* allo = Ideal_allocation(ptr);
1185 return (allo == nullptr || !allo->is_AllocateArray())
1186 ? nullptr : allo->as_AllocateArray();
1187 }
1188 };
1189
1190 //------------------------------AbstractLockNode-----------------------------------
1191 class AbstractLockNode: public CallNode {
1192 private:
1193 enum {
1194 Regular = 0, // Normal lock
1195 NonEscObj, // Lock is used for non escaping object
1196 Coarsened, // Lock was coarsened
1197 Nested // Nested lock
1198 } _kind;
1199
1200 static const char* _kind_names[Nested+1];
1201
1202 #ifndef PRODUCT
1203 NamedCounter* _counter;
1204 #endif
1205
1206 protected:
1207 // helper functions for lock elimination
1208 //
1209
1210 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1211 GrowableArray<AbstractLockNode*> &lock_ops);
1212 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1213 GrowableArray<AbstractLockNode*> &lock_ops);
1214 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1215 GrowableArray<AbstractLockNode*> &lock_ops);
1216 LockNode *find_matching_lock(UnlockNode* unlock);
1217
1218 // Update the counter to indicate that this lock was eliminated.
1219 void set_eliminated_lock_counter() PRODUCT_RETURN;
1220
1221 public:
1222 AbstractLockNode(const TypeFunc *tf)
1223 : CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
1224 _kind(Regular)
1225 {
1226 #ifndef PRODUCT
1227 _counter = nullptr;
1228 #endif
1229 }
1230 virtual int Opcode() const = 0;
1231 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
1232 Node * box_node() const {return in(TypeFunc::Parms + 1); }
1233 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
1234 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1235
1236 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1237
1238 virtual uint size_of() const { return sizeof(*this); }
1239
1240 bool is_eliminated() const { return (_kind != Regular); }
1241 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1242 bool is_coarsened() const { return (_kind == Coarsened); }
1243 bool is_nested() const { return (_kind == Nested); }
1244
1245 const char * kind_as_string() const;
1246 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
1247
1248 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1249 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
1250 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
1251
1252 // Check that all locks/unlocks associated with object come from balanced regions.
1253 // They can become unbalanced after coarsening optimization or on OSR entry.
1254 bool is_balanced();
1255
1256 // locking does not modify its arguments
1257 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
1258
1259 #ifndef PRODUCT
1260 void create_lock_counter(JVMState* s);
1261 NamedCounter* counter() const { return _counter; }
1262 virtual void dump_spec(outputStream* st) const;
1263 virtual void dump_compact_spec(outputStream* st) const;
1264 #endif
1265 };
1266
1267 //------------------------------Lock---------------------------------------
1268 // High-level lock operation
1269 //
1270 // This is a subclass of CallNode because it is a macro node which gets expanded
1271 // into a code sequence containing a call. This node takes 3 "parameters":
1272 // 0 - object to lock
1273 // 1 - a BoxLockNode
1274 // 2 - a FastLockNode
1275 //
1276 class LockNode : public AbstractLockNode {
1277 static const TypeFunc* _lock_type_Type;
1278 public:
1279
1280 static inline const TypeFunc* lock_type() {
1281 assert(_lock_type_Type != nullptr, "should be initialized");
1282 return _lock_type_Type;
1283 }
1284
1285 static void initialize_lock_Type() {
1286 assert(_lock_type_Type == nullptr, "should be called once");
1287 // create input type (domain)
1288 const Type **fields = TypeTuple::fields(3);
1289 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1290 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1291 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1292 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1293
1294 // create result type (range)
1295 fields = TypeTuple::fields(0);
1296
1297 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1298
1299 _lock_type_Type = TypeFunc::make(domain,range);
1300 }
1301
1302 virtual int Opcode() const;
1303 virtual uint size_of() const; // Size is bigger
1304 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1305 init_class_id(Class_Lock);
1306 init_flags(Flag_is_macro);
1307 C->add_macro_node(this);
1308 }
1309 virtual bool guaranteed_safepoint() { return false; }
1310
1311 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1312 // Expansion modifies the JVMState, so we need to deep clone it
1313 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1314
1315 bool is_nested_lock_region(); // Is this Lock nested?
1316 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1317 };
1318
1319 //------------------------------Unlock---------------------------------------
1320 // High-level unlock operation
1321 class UnlockNode : public AbstractLockNode {
1322 private:
1323 #ifdef ASSERT
1324 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects
1325 #endif
1326 public:
1327 virtual int Opcode() const;
1328 virtual uint size_of() const; // Size is bigger
1329 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1330 #ifdef ASSERT
1331 , _dbg_jvms(nullptr)
1332 #endif
1333 {
1334 init_class_id(Class_Unlock);
1335 init_flags(Flag_is_macro);
1336 C->add_macro_node(this);
1337 }
1338 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1339 // unlock is never a safepoint
1340 virtual bool guaranteed_safepoint() { return false; }
1341 #ifdef ASSERT
1342 void set_dbg_jvms(JVMState* s) {
1343 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor
1344 }
1345 JVMState* dbg_jvms() const { return _dbg_jvms; }
1346 #else
1347 JVMState* dbg_jvms() const { return nullptr; }
1348 #endif
1349 };
1350 #endif // SHARE_OPTO_CALLNODE_HPP