1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_CALLNODE_HPP
26 #define SHARE_OPTO_CALLNODE_HPP
27
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/replacednodes.hpp"
34 #include "opto/type.hpp"
35 #include "utilities/growableArray.hpp"
36
37 // Portions of code courtesy of Clifford Click
38
39 // Optimization - Graph Style
40
41 class NamedCounter;
42 class MultiNode;
43 class SafePointNode;
44 class CallNode;
45 class CallJavaNode;
46 class CallStaticJavaNode;
47 class CallDynamicJavaNode;
48 class CallRuntimeNode;
49 class CallLeafNode;
50 class CallLeafNoFPNode;
51 class CallLeafVectorNode;
52 class AllocateNode;
53 class AllocateArrayNode;
54 class AbstractLockNode;
55 class LockNode;
56 class UnlockNode;
57 class FastLockNode;
58
59 //------------------------------StartNode--------------------------------------
60 // The method start node
61 class StartNode : public MultiNode {
62 virtual bool cmp( const Node &n ) const;
63 virtual uint size_of() const; // Size is bigger
64 public:
65 const TypeTuple *_domain;
66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
67 init_class_id(Class_Start);
68 init_req(0,this);
69 init_req(1,root);
70 }
71 virtual int Opcode() const;
72 virtual bool pinned() const { return true; };
73 virtual const Type *bottom_type() const;
74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
75 virtual const Type* Value(PhaseGVN* phase) const;
76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
78 virtual const RegMask &in_RegMask(uint) const;
79 virtual Node *match( const ProjNode *proj, const Matcher *m );
80 virtual uint ideal_reg() const { return 0; }
81 #ifndef PRODUCT
82 virtual void dump_spec(outputStream *st) const;
83 virtual void dump_compact_spec(outputStream *st) const;
84 #endif
85 };
86
87 //------------------------------StartOSRNode-----------------------------------
88 // The method start node for on stack replacement code
89 class StartOSRNode : public StartNode {
90 public:
91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
92 virtual int Opcode() const;
93 static const TypeTuple *osr_domain();
94 };
95
96
97 //------------------------------ParmNode---------------------------------------
98 // Incoming parameters
99 class ParmNode : public ProjNode {
100 static const char * const names[TypeFunc::Parms+1];
101 public:
102 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
103 init_class_id(Class_Parm);
104 }
105 virtual int Opcode() const;
106 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
107 virtual uint ideal_reg() const;
108 #ifndef PRODUCT
109 virtual void dump_spec(outputStream *st) const;
110 virtual void dump_compact_spec(outputStream *st) const;
111 #endif
112 };
113
114
115 //------------------------------ReturnNode-------------------------------------
116 // Return from subroutine node
117 class ReturnNode : public Node {
118 public:
119 ReturnNode(uint edges, Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr);
120 virtual int Opcode() const;
121 virtual bool is_CFG() const { return true; }
122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
123 virtual bool depends_only_on_test() const { return false; }
124 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
125 virtual const Type* Value(PhaseGVN* phase) const;
126 virtual uint ideal_reg() const { return NotAMachineReg; }
127 virtual uint match_edge(uint idx) const;
128 #ifndef PRODUCT
129 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
130 #endif
131 };
132
133
134 //------------------------------RethrowNode------------------------------------
135 // Rethrow of exception at call site. Ends a procedure before rethrowing;
136 // ends the current basic block like a ReturnNode. Restores registers and
137 // unwinds stack. Rethrow happens in the caller's method.
138 class RethrowNode : public Node {
139 public:
140 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
141 virtual int Opcode() const;
142 virtual bool is_CFG() const { return true; }
143 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
144 virtual bool depends_only_on_test() const { return false; }
145 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
146 virtual const Type* Value(PhaseGVN* phase) const;
147 virtual uint match_edge(uint idx) const;
148 virtual uint ideal_reg() const { return NotAMachineReg; }
149 #ifndef PRODUCT
150 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
151 #endif
152 };
153
154
155 //------------------------------ForwardExceptionNode---------------------------
156 // Pop stack frame and jump to StubRoutines::forward_exception_entry()
157 class ForwardExceptionNode : public ReturnNode {
158 public:
159 ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr)
160 : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) {
161 }
162
163 virtual int Opcode() const;
164 };
165
166 //------------------------------TailCallNode-----------------------------------
167 // Pop stack frame and jump indirect
168 class TailCallNode : public ReturnNode {
169 public:
170 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
171 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
172 init_req(TypeFunc::Parms, target);
173 init_req(TypeFunc::Parms+1, moop);
174 }
175
176 virtual int Opcode() const;
177 virtual uint match_edge(uint idx) const;
178 };
179
180 //------------------------------TailJumpNode-----------------------------------
181 // Pop stack frame and jump indirect
182 class TailJumpNode : public ReturnNode {
183 public:
184 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
185 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
186 init_req(TypeFunc::Parms, target);
187 init_req(TypeFunc::Parms+1, ex_oop);
188 }
189
190 virtual int Opcode() const;
191 virtual uint match_edge(uint idx) const;
192 };
193
194 //-------------------------------JVMState-------------------------------------
195 // A linked list of JVMState nodes captures the whole interpreter state,
196 // plus GC roots, for all active calls at some call site in this compilation
197 // unit. (If there is no inlining, then the list has exactly one link.)
198 // This provides a way to map the optimized program back into the interpreter,
199 // or to let the GC mark the stack.
200 class JVMState : public ResourceObj {
201 public:
202 typedef enum {
203 Reexecute_Undefined = -1, // not defined -- will be translated into false later
204 Reexecute_False = 0, // false -- do not reexecute
205 Reexecute_True = 1 // true -- reexecute the bytecode
206 } ReexecuteState; //Reexecute State
207
208 private:
209 JVMState* _caller; // List pointer for forming scope chains
210 uint _depth; // One more than caller depth, or one.
211 uint _locoff; // Offset to locals in input edge mapping
212 uint _stkoff; // Offset to stack in input edge mapping
213 uint _monoff; // Offset to monitors in input edge mapping
214 uint _scloff; // Offset to fields of scalar objs in input edge mapping
215 uint _endoff; // Offset to end of input edge mapping
216 uint _sp; // Java Expression Stack Pointer for this state
217 int _bci; // Byte Code Index of this JVM point
218 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
219 ciMethod* _method; // Method Pointer
220 ciInstance* _receiver_info; // Constant receiver instance for compiled lambda forms
221 SafePointNode* _map; // Map node associated with this scope
222 public:
223 friend class Compile;
224 friend class PreserveReexecuteState;
225
226 // Because JVMState objects live over the entire lifetime of the
227 // Compile object, they are allocated into the comp_arena, which
228 // does not get resource marked or reset during the compile process
229 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
230 void operator delete( void * ) { } // fast deallocation
231
232 // Create a new JVMState, ready for abstract interpretation.
233 JVMState(ciMethod* method, JVMState* caller);
234 JVMState(int stack_size); // root state; has a null method
235
236 // Access functions for the JVM
237 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
238 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
239 uint locoff() const { return _locoff; }
240 uint stkoff() const { return _stkoff; }
241 uint argoff() const { return _stkoff + _sp; }
242 uint monoff() const { return _monoff; }
243 uint scloff() const { return _scloff; }
244 uint endoff() const { return _endoff; }
245 uint oopoff() const { return debug_end(); }
246
247 int loc_size() const { return stkoff() - locoff(); }
248 int stk_size() const { return monoff() - stkoff(); }
249 int mon_size() const { return scloff() - monoff(); }
250 int scl_size() const { return endoff() - scloff(); }
251
252 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
253 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
254 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
255 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
256
257 uint sp() const { return _sp; }
258 int bci() const { return _bci; }
259 bool should_reexecute() const { return _reexecute==Reexecute_True; }
260 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
261 bool has_method() const { return _method != nullptr; }
262 ciMethod* method() const { assert(has_method(), ""); return _method; }
263 ciInstance* receiver_info() const { assert(has_method(), ""); return _receiver_info; }
264 JVMState* caller() const { return _caller; }
265 SafePointNode* map() const { return _map; }
266 uint depth() const { return _depth; }
267 uint debug_start() const; // returns locoff of root caller
268 uint debug_end() const; // returns endoff of self
269 uint debug_size() const {
270 return loc_size() + sp() + mon_size() + scl_size();
271 }
272 uint debug_depth() const; // returns sum of debug_size values at all depths
273
274 // Returns the JVM state at the desired depth (1 == root).
275 JVMState* of_depth(int d) const;
276
277 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
278 bool same_calls_as(const JVMState* that) const;
279
280 // Monitors (monitors are stored as (boxNode, objNode) pairs
281 enum { logMonitorEdges = 1 };
282 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
283 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
284 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
285 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
286 bool is_monitor_box(uint off) const {
287 assert(is_mon(off), "should be called only for monitor edge");
288 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
289 }
290 bool is_monitor_use(uint off) const { return (is_mon(off)
291 && is_monitor_box(off))
292 || (caller() && caller()->is_monitor_use(off)); }
293
294 // Initialization functions for the JVM
295 void set_locoff(uint off) { _locoff = off; }
296 void set_stkoff(uint off) { _stkoff = off; }
297 void set_monoff(uint off) { _monoff = off; }
298 void set_scloff(uint off) { _scloff = off; }
299 void set_endoff(uint off) { _endoff = off; }
300 void set_offsets(uint off) {
301 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
302 }
303 void set_map(SafePointNode* map) { _map = map; }
304 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
305 void set_sp(uint sp) { _sp = sp; }
306 // _reexecute is initialized to "undefined" for a new bci
307 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
308 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
309 void set_receiver_info(ciInstance* recv) { assert(has_method() || recv == nullptr, ""); _receiver_info = recv; }
310
311 // Miscellaneous utility functions
312 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
313 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
314 void set_map_deep(SafePointNode *map);// reset map for all callers
315 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
316 int interpreter_frame_size() const;
317 ciInstance* compute_receiver_info(ciMethod* callee) const;
318
319 #ifndef PRODUCT
320 void print_method_with_lineno(outputStream* st, bool show_name) const;
321 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
322 void dump_spec(outputStream *st) const;
323 void dump_on(outputStream* st) const;
324 void dump() const {
325 dump_on(tty);
326 }
327 #endif
328 };
329
330 //------------------------------SafePointNode----------------------------------
331 // A SafePointNode is a subclass of a MultiNode for convenience (and
332 // potential code sharing) only - conceptually it is independent of
333 // the Node semantics.
334 class SafePointNode : public MultiNode {
335 friend JVMState;
336 friend class GraphKit;
337 friend class LibraryCallKit;
338
339 virtual bool cmp( const Node &n ) const;
340 virtual uint size_of() const; // Size is bigger
341
342 protected:
343 JVMState* const _jvms; // Pointer to list of JVM State objects
344 // Many calls take *all* of memory as input,
345 // but some produce a limited subset of that memory as output.
346 // The adr_type reports the call's behavior as a store, not a load.
347 const TypePtr* _adr_type; // What type of memory does this node produce?
348 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
349 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
350
351 void set_jvms(JVMState* s) {
352 assert(s != nullptr, "assign null value to _jvms");
353 *(JVMState**)&_jvms = s; // override const attribute in the accessor
354 }
355 public:
356 SafePointNode(uint edges, JVMState* jvms,
357 // A plain safepoint advertises no memory effects (null):
358 const TypePtr* adr_type = nullptr)
359 : MultiNode( edges ),
360 _jvms(jvms),
361 _adr_type(adr_type),
362 _has_ea_local_in_scope(false)
363 {
364 init_class_id(Class_SafePoint);
365 }
366
367 JVMState* jvms() const { return _jvms; }
368 virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
369 void clone_jvms(Compile* C) {
370 if (jvms() != nullptr) {
371 if (needs_deep_clone_jvms(C)) {
372 set_jvms(jvms()->clone_deep(C));
373 jvms()->set_map_deep(this);
374 } else {
375 jvms()->clone_shallow(C)->bind_map(this);
376 }
377 }
378 }
379
380 private:
381 void verify_input(const JVMState* jvms, uint idx) const {
382 assert(verify_jvms(jvms), "jvms must match");
383 Node* n = in(idx);
384 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
385 in(idx + 1)->is_top(), "2nd half of long/double");
386 }
387
388 public:
389 // Functionality from old debug nodes which has changed
390 Node* local(const JVMState* jvms, uint idx) const {
391 uint loc_idx = jvms->locoff() + idx;
392 assert(jvms->is_loc(loc_idx), "not a local slot");
393 verify_input(jvms, loc_idx);
394 return in(loc_idx);
395 }
396 Node* stack(const JVMState* jvms, uint idx) const {
397 uint stk_idx = jvms->stkoff() + idx;
398 assert(jvms->is_stk(stk_idx), "not a stack slot");
399 verify_input(jvms, stk_idx);
400 return in(stk_idx);
401 }
402 Node* argument(const JVMState* jvms, uint idx) const {
403 uint arg_idx = jvms->argoff() + idx;
404 assert(jvms->is_stk(arg_idx), "not an argument slot");
405 verify_input(jvms, arg_idx);
406 return in(jvms->argoff() + idx);
407 }
408 Node* monitor_box(const JVMState* jvms, uint idx) const {
409 assert(verify_jvms(jvms), "jvms must match");
410 uint mon_box_idx = jvms->monitor_box_offset(idx);
411 assert(jvms->is_monitor_box(mon_box_idx), "not a monitor box offset");
412 return in(mon_box_idx);
413 }
414 Node* monitor_obj(const JVMState* jvms, uint idx) const {
415 assert(verify_jvms(jvms), "jvms must match");
416 uint mon_obj_idx = jvms->monitor_obj_offset(idx);
417 assert(jvms->is_mon(mon_obj_idx) && !jvms->is_monitor_box(mon_obj_idx), "not a monitor obj offset");
418 return in(mon_obj_idx);
419 }
420
421 void set_local(const JVMState* jvms, uint idx, Node *c);
422
423 void set_stack(const JVMState* jvms, uint idx, Node *c) {
424 assert(verify_jvms(jvms), "jvms must match");
425 set_req(jvms->stkoff() + idx, c);
426 }
427 void set_argument(const JVMState* jvms, uint idx, Node *c) {
428 assert(verify_jvms(jvms), "jvms must match");
429 set_req(jvms->argoff() + idx, c);
430 }
431 void ensure_stack(JVMState* jvms, uint stk_size) {
432 assert(verify_jvms(jvms), "jvms must match");
433 int grow_by = (int)stk_size - (int)jvms->stk_size();
434 if (grow_by > 0) grow_stack(jvms, grow_by);
435 }
436 void grow_stack(JVMState* jvms, uint grow_by);
437 // Handle monitor stack
438 void push_monitor( const FastLockNode *lock );
439 void pop_monitor ();
440 Node *peek_monitor_box() const;
441 Node *peek_monitor_obj() const;
442 // Peek Operand Stacks, JVMS 2.6.2
443 Node* peek_operand(uint off = 0) const;
444
445 // Access functions for the JVM
446 Node *control () const { return in(TypeFunc::Control ); }
447 Node *i_o () const { return in(TypeFunc::I_O ); }
448 Node *memory () const { return in(TypeFunc::Memory ); }
449 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
450 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
451
452 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
453 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
454 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
455
456 MergeMemNode* merged_memory() const {
457 return in(TypeFunc::Memory)->as_MergeMem();
458 }
459
460 // The parser marks useless maps as dead when it's done with them:
461 bool is_killed() { return in(TypeFunc::Control) == nullptr; }
462
463 // Exception states bubbling out of subgraphs such as inlined calls
464 // are recorded here. (There might be more than one, hence the "next".)
465 // This feature is used only for safepoints which serve as "maps"
466 // for JVM states during parsing, intrinsic expansion, etc.
467 SafePointNode* next_exception() const;
468 void set_next_exception(SafePointNode* n);
469 bool has_exceptions() const { return next_exception() != nullptr; }
470
471 // Helper methods to operate on replaced nodes
472 ReplacedNodes replaced_nodes() const {
473 return _replaced_nodes;
474 }
475
476 void set_replaced_nodes(ReplacedNodes replaced_nodes) {
477 _replaced_nodes = replaced_nodes;
478 }
479
480 void clone_replaced_nodes() {
481 _replaced_nodes.clone();
482 }
483 void record_replaced_node(Node* initial, Node* improved) {
484 _replaced_nodes.record(initial, improved);
485 }
486 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
487 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
488 }
489 void delete_replaced_nodes() {
490 _replaced_nodes.reset();
491 }
492 void apply_replaced_nodes(uint idx) {
493 _replaced_nodes.apply(this, idx);
494 }
495 void merge_replaced_nodes_with(SafePointNode* sfpt) {
496 _replaced_nodes.merge_with(sfpt->_replaced_nodes);
497 }
498 bool has_replaced_nodes() const {
499 return !_replaced_nodes.is_empty();
500 }
501 void set_has_ea_local_in_scope(bool b) {
502 _has_ea_local_in_scope = b;
503 }
504 bool has_ea_local_in_scope() const {
505 return _has_ea_local_in_scope;
506 }
507
508 void disconnect_from_root(PhaseIterGVN *igvn);
509
510 // Standard Node stuff
511 virtual int Opcode() const;
512 virtual bool pinned() const { return true; }
513 virtual const Type* Value(PhaseGVN* phase) const;
514 virtual const Type* bottom_type() const { return Type::CONTROL; }
515 virtual const TypePtr* adr_type() const { return _adr_type; }
516 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
517 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
518 virtual Node* Identity(PhaseGVN* phase);
519 virtual uint ideal_reg() const { return 0; }
520 virtual const RegMask &in_RegMask(uint) const;
521 virtual const RegMask &out_RegMask() const;
522 virtual uint match_edge(uint idx) const;
523
524 #ifndef PRODUCT
525 virtual void dump_spec(outputStream *st) const;
526 #endif
527 };
528
529 //------------------------------SafePointScalarObjectNode----------------------
530 // A SafePointScalarObjectNode represents the state of a scalarized object
531 // at a safepoint.
532 class SafePointScalarObjectNode: public TypeNode {
533 uint _first_index; // First input edge relative index of a SafePoint node where
534 // states of the scalarized object fields are collected.
535 uint _depth; // Depth of the JVM state the _first_index field refers to
536 uint _n_fields; // Number of non-static fields of the scalarized object.
537
538 Node* _alloc; // Just for debugging purposes.
539
540 virtual uint hash() const;
541 virtual bool cmp( const Node &n ) const;
542
543 uint first_index() const { return _first_index; }
544
545 public:
546 SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields);
547
548 virtual int Opcode() const;
549 virtual uint ideal_reg() const;
550 virtual const RegMask &in_RegMask(uint) const;
551 virtual const RegMask &out_RegMask() const;
552 virtual uint match_edge(uint idx) const;
553
554 uint first_index(JVMState* jvms) const {
555 assert(jvms != nullptr, "missed JVMS");
556 return jvms->of_depth(_depth)->scloff() + _first_index;
557 }
558 uint n_fields() const { return _n_fields; }
559
560 #ifdef ASSERT
561 Node* alloc() const { return _alloc; }
562 #endif
563
564 virtual uint size_of() const { return sizeof(*this); }
565
566 // Assumes that "this" is an argument to a safepoint node "s", and that
567 // "new_call" is being created to correspond to "s". But the difference
568 // between the start index of the jvmstates of "new_call" and "s" is
569 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
570 // corresponds appropriately to "this" in "new_call". Assumes that
571 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
572 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
573 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
574
575 #ifndef PRODUCT
576 virtual void dump_spec(outputStream *st) const;
577 #endif
578 };
579
580 //------------------------------SafePointScalarMergeNode----------------------
581 //
582 // This class represents an allocation merge that is used as debug information
583 // and had at least one of its input scalar replaced.
584 //
585 // The required inputs of this node, except the control, are pointers to
586 // SafePointScalarObjectNodes that describe scalarized inputs of the original
587 // allocation merge. The other(s) properties of the class are described below.
588 //
589 // _merge_pointer_idx : index in the SafePointNode's input array where the
590 // description of the _allocation merge_ starts. The index is zero based and
591 // relative to the SafePoint's scloff. The two entries in the SafePointNode's
592 // input array starting at '_merge_pointer_idx` are Phi nodes representing:
593 //
594 // 1) The original merge Phi. During rematerialization this input will only be
595 // used if the "selector Phi" (see below) indicates that the execution of the
596 // Phi took the path of a non scalarized input.
597 //
598 // 2) A "selector Phi". The output of this Phi will be '-1' if the execution
599 // of the method exercised a non scalarized input of the original Phi.
600 // Otherwise, the output will be >=0, and it will indicate the index-1 in the
601 // SafePointScalarMergeNode input array where the description of the
602 // scalarized object that should be used is.
603 //
604 // As an example, consider a Phi merging 3 inputs, of which the last 2 are
605 // scalar replaceable.
606 //
607 // Phi(Region, NSR, SR, SR)
608 //
609 // During scalar replacement the SR inputs will be changed to null:
610 //
611 // Phi(Region, NSR, nullptr, nullptr)
612 //
613 // A corresponding selector Phi will be created with a configuration like this:
614 //
615 // Phi(Region, -1, 0, 1)
616 //
617 // During execution of the compiled method, if the execution reaches a Trap, the
618 // output of the selector Phi will tell if we need to rematerialize one of the
619 // scalar replaced inputs or if we should just use the pointer returned by the
620 // original Phi.
621
622 class SafePointScalarMergeNode: public TypeNode {
623 int _merge_pointer_idx; // This is the first input edge relative
624 // index of a SafePoint node where metadata information relative
625 // to restoring the merge is stored. The corresponding input
626 // in the associated SafePoint will point to a Phi representing
627 // potential non-scalar replaced objects.
628
629 virtual uint hash() const;
630 virtual bool cmp( const Node &n ) const;
631
632 public:
633 SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx);
634
635 virtual int Opcode() const;
636 virtual uint ideal_reg() const;
637 virtual const RegMask &in_RegMask(uint) const;
638 virtual const RegMask &out_RegMask() const;
639 virtual uint match_edge(uint idx) const;
640
641 virtual uint size_of() const { return sizeof(*this); }
642
643 int merge_pointer_idx(JVMState* jvms) const {
644 assert(jvms != nullptr, "JVMS reference is null.");
645 return jvms->scloff() + _merge_pointer_idx;
646 }
647
648 int selector_idx(JVMState* jvms) const {
649 assert(jvms != nullptr, "JVMS reference is null.");
650 return jvms->scloff() + _merge_pointer_idx + 1;
651 }
652
653 // Assumes that "this" is an argument to a safepoint node "s", and that
654 // "new_call" is being created to correspond to "s". But the difference
655 // between the start index of the jvmstates of "new_call" and "s" is
656 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
657 // corresponds appropriately to "this" in "new_call". Assumes that
658 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
659 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
660 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
661
662 #ifndef PRODUCT
663 virtual void dump_spec(outputStream *st) const;
664 #endif
665 };
666
667 // Simple container for the outgoing projections of a call. Useful
668 // for serious surgery on calls.
669 class CallProjections : public StackObj {
670 public:
671 Node* fallthrough_proj;
672 Node* fallthrough_catchproj;
673 Node* fallthrough_memproj;
674 Node* fallthrough_ioproj;
675 Node* catchall_catchproj;
676 Node* catchall_memproj;
677 Node* catchall_ioproj;
678 Node* resproj;
679 Node* exobj;
680 };
681
682 class CallGenerator;
683
684 //------------------------------CallNode---------------------------------------
685 // Call nodes now subsume the function of debug nodes at callsites, so they
686 // contain the functionality of a full scope chain of debug nodes.
687 class CallNode : public SafePointNode {
688
689 protected:
690 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
691
692 public:
693 const TypeFunc* _tf; // Function type
694 address _entry_point; // Address of method being called
695 float _cnt; // Estimate of number of times called
696 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
697 const char* _name; // Printable name, if _method is null
698
699 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
700 : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
701 _tf(tf),
702 _entry_point(addr),
703 _cnt(COUNT_UNKNOWN),
704 _generator(nullptr),
705 _name(nullptr)
706 {
707 init_class_id(Class_Call);
708 }
709
710 const TypeFunc* tf() const { return _tf; }
711 address entry_point() const { return _entry_point; }
712 float cnt() const { return _cnt; }
713 CallGenerator* generator() const { return _generator; }
714
715 void set_tf(const TypeFunc* tf) { _tf = tf; }
716 void set_entry_point(address p) { _entry_point = p; }
717 void set_cnt(float c) { _cnt = c; }
718 void set_generator(CallGenerator* cg) { _generator = cg; }
719
720 virtual const Type* bottom_type() const;
721 virtual const Type* Value(PhaseGVN* phase) const;
722 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
723 virtual Node* Identity(PhaseGVN* phase) { return this; }
724 virtual bool cmp(const Node &n) const;
725 virtual uint size_of() const = 0;
726 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
727 virtual Node* match(const ProjNode* proj, const Matcher* m);
728 virtual uint ideal_reg() const { return NotAMachineReg; }
729 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
730 // for some macro nodes whose expansion does not have a safepoint on the fast path.
731 virtual bool guaranteed_safepoint() { return true; }
732 // For macro nodes, the JVMState gets modified during expansion. If calls
733 // use MachConstantBase, it gets modified during matching. So when cloning
734 // the node the JVMState must be deep cloned. Default is to shallow clone.
735 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
736
737 // Returns true if the call may modify n
738 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
739 // Does this node have a use of n other than in debug information?
740 bool has_non_debug_use(Node* n);
741 // Returns the unique CheckCastPP of a call
742 // or result projection is there are several CheckCastPP
743 // or returns null if there is no one.
744 Node* result_cast();
745 // Does this node returns pointer?
746 bool returns_pointer() const {
747 const TypeTuple* r = tf()->range();
748 return (r->cnt() > TypeFunc::Parms &&
749 r->field_at(TypeFunc::Parms)->isa_ptr());
750 }
751
752 // Collect all the interesting edges from a call for use in
753 // replacing the call by something else. Used by macro expansion
754 // and the late inlining support.
755 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
756
757 virtual uint match_edge(uint idx) const;
758
759 bool is_call_to_arraycopystub() const;
760
761 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
762
763 #ifndef PRODUCT
764 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
765 virtual void dump_spec(outputStream* st) const;
766 #endif
767 };
768
769
770 //------------------------------CallJavaNode-----------------------------------
771 // Make a static or dynamic subroutine call node using Java calling
772 // convention. (The "Java" calling convention is the compiler's calling
773 // convention, as opposed to the interpreter's or that of native C.)
774 class CallJavaNode : public CallNode {
775 protected:
776 virtual bool cmp( const Node &n ) const;
777 virtual uint size_of() const; // Size is bigger
778
779 ciMethod* _method; // Method being direct called
780 bool _optimized_virtual;
781 bool _override_symbolic_info; // Override symbolic call site info from bytecode
782 bool _arg_escape; // ArgEscape in parameter list
783 public:
784 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
785 : CallNode(tf, addr, TypePtr::BOTTOM),
786 _method(method),
787 _optimized_virtual(false),
788 _override_symbolic_info(false),
789 _arg_escape(false)
790 {
791 init_class_id(Class_CallJava);
792 }
793
794 virtual int Opcode() const;
795 ciMethod* method() const { return _method; }
796 void set_method(ciMethod *m) { _method = m; }
797 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
798 bool is_optimized_virtual() const { return _optimized_virtual; }
799 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
800 bool override_symbolic_info() const { return _override_symbolic_info; }
801 void set_arg_escape(bool f) { _arg_escape = f; }
802 bool arg_escape() const { return _arg_escape; }
803 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
804 void register_for_late_inline();
805
806 DEBUG_ONLY( bool validate_symbolic_info() const; )
807
808 #ifndef PRODUCT
809 virtual void dump_spec(outputStream *st) const;
810 virtual void dump_compact_spec(outputStream *st) const;
811 #endif
812 };
813
814 //------------------------------CallStaticJavaNode-----------------------------
815 // Make a direct subroutine call using Java calling convention (for static
816 // calls and optimized virtual calls, plus calls to wrappers for run-time
817 // routines); generates static stub.
818 class CallStaticJavaNode : public CallJavaNode {
819 virtual bool cmp( const Node &n ) const;
820 virtual uint size_of() const; // Size is bigger
821 public:
822 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
823 : CallJavaNode(tf, addr, method) {
824 init_class_id(Class_CallStaticJava);
825 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
826 init_flags(Flag_is_macro);
827 C->add_macro_node(this);
828 }
829 }
830 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
831 : CallJavaNode(tf, addr, nullptr) {
832 init_class_id(Class_CallStaticJava);
833 // This node calls a runtime stub, which often has narrow memory effects.
834 _adr_type = adr_type;
835 _name = name;
836 }
837
838 // If this is an uncommon trap, return the request code, else zero.
839 int uncommon_trap_request() const;
840 bool is_uncommon_trap() const;
841 static int extract_uncommon_trap_request(const Node* call);
842
843 bool is_boxing_method() const {
844 return is_macro() && (method() != nullptr) && method()->is_boxing_method();
845 }
846 // Late inlining modifies the JVMState, so we need to deep clone it
847 // when the call node is cloned (because it is macro node).
848 virtual bool needs_deep_clone_jvms(Compile* C) {
849 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
850 }
851
852 virtual int Opcode() const;
853 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
854
855 #ifndef PRODUCT
856 virtual void dump_spec(outputStream *st) const;
857 virtual void dump_compact_spec(outputStream *st) const;
858 #endif
859 };
860
861 //------------------------------CallDynamicJavaNode----------------------------
862 // Make a dispatched call using Java calling convention.
863 class CallDynamicJavaNode : public CallJavaNode {
864 virtual bool cmp( const Node &n ) const;
865 virtual uint size_of() const; // Size is bigger
866 public:
867 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
868 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
869 init_class_id(Class_CallDynamicJava);
870 }
871
872 // Late inlining modifies the JVMState, so we need to deep clone it
873 // when the call node is cloned.
874 virtual bool needs_deep_clone_jvms(Compile* C) {
875 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
876 }
877
878 int _vtable_index;
879 virtual int Opcode() const;
880 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
881 #ifndef PRODUCT
882 virtual void dump_spec(outputStream *st) const;
883 #endif
884 };
885
886 //------------------------------CallRuntimeNode--------------------------------
887 // Make a direct subroutine call node into compiled C++ code.
888 class CallRuntimeNode : public CallNode {
889 protected:
890 virtual bool cmp( const Node &n ) const;
891 virtual uint size_of() const; // Size is bigger
892 public:
893 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
894 const TypePtr* adr_type, JVMState* jvms = nullptr)
895 : CallNode(tf, addr, adr_type, jvms)
896 {
897 init_class_id(Class_CallRuntime);
898 _name = name;
899 }
900
901 virtual int Opcode() const;
902 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
903
904 #ifndef PRODUCT
905 virtual void dump_spec(outputStream *st) const;
906 #endif
907 };
908
909 //------------------------------CallLeafNode-----------------------------------
910 // Make a direct subroutine call node into compiled C++ code, without
911 // safepoints
912 class CallLeafNode : public CallRuntimeNode {
913 public:
914 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
915 const TypePtr* adr_type)
916 : CallRuntimeNode(tf, addr, name, adr_type)
917 {
918 init_class_id(Class_CallLeaf);
919 }
920 virtual int Opcode() const;
921 virtual bool guaranteed_safepoint() { return false; }
922 #ifndef PRODUCT
923 virtual void dump_spec(outputStream *st) const;
924 #endif
925 };
926
927 /* A pure function call, they are assumed not to be safepoints, not to read or write memory,
928 * have no exception... They just take parameters, return a value without side effect. It is
929 * always correct to create some, or remove them, if the result is not used.
930 *
931 * They still have control input to allow easy lowering into other kind of calls that require
932 * a control, but this is more a technical than a moral constraint.
933 *
934 * Pure calls must have only control and data input and output: I/O, Memory and so on must be top.
935 * Nevertheless, pure calls can typically be expensive math operations so care must be taken
936 * when letting the node float.
937 */
938 class CallLeafPureNode : public CallLeafNode {
939 protected:
940 bool is_unused() const;
941 bool is_dead() const;
942 TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const;
943
944 public:
945 CallLeafPureNode(const TypeFunc* tf, address addr, const char* name,
946 const TypePtr* adr_type)
947 : CallLeafNode(tf, addr, name, adr_type) {
948 init_class_id(Class_CallLeafPure);
949 }
950 int Opcode() const override;
951 Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
952 };
953
954 //------------------------------CallLeafNoFPNode-------------------------------
955 // CallLeafNode, not using floating point or using it in the same manner as
956 // the generated code
957 class CallLeafNoFPNode : public CallLeafNode {
958 public:
959 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
960 const TypePtr* adr_type)
961 : CallLeafNode(tf, addr, name, adr_type)
962 {
963 init_class_id(Class_CallLeafNoFP);
964 }
965 virtual int Opcode() const;
966 };
967
968 //------------------------------CallLeafVectorNode-------------------------------
969 // CallLeafNode but calling with vector calling convention instead.
970 class CallLeafVectorNode : public CallLeafNode {
971 private:
972 uint _num_bits;
973 protected:
974 virtual bool cmp( const Node &n ) const;
975 virtual uint size_of() const; // Size is bigger
976 public:
977 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
978 const TypePtr* adr_type, uint num_bits)
979 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
980 {
981 }
982 virtual int Opcode() const;
983 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
984 };
985
986
987 //------------------------------Allocate---------------------------------------
988 // High-level memory allocation
989 //
990 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
991 // get expanded into a code sequence containing a call. Unlike other CallNodes,
992 // they have 2 memory projections and 2 i_o projections (which are distinguished by
993 // the _is_io_use flag in the projection.) This is needed when expanding the node in
994 // order to differentiate the uses of the projection on the normal control path from
995 // those on the exception return path.
996 //
997 class AllocateNode : public CallNode {
998 public:
999 enum {
1000 // Output:
1001 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
1002 // Inputs:
1003 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
1004 KlassNode, // type (maybe dynamic) of the obj.
1005 InitialTest, // slow-path test (may be constant)
1006 ALength, // array length (or TOP if none)
1007 ValidLengthTest,
1008 ParmLimit
1009 };
1010
1011 static const TypeFunc* alloc_type(const Type* t) {
1012 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1013 fields[AllocSize] = TypeInt::POS;
1014 fields[KlassNode] = TypeInstPtr::NOTNULL;
1015 fields[InitialTest] = TypeInt::BOOL;
1016 fields[ALength] = t; // length (can be a bad length)
1017 fields[ValidLengthTest] = TypeInt::BOOL;
1018
1019 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1020
1021 // create result type (range)
1022 fields = TypeTuple::fields(1);
1023 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1024
1025 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1026
1027 return TypeFunc::make(domain, range);
1028 }
1029
1030 // Result of Escape Analysis
1031 bool _is_scalar_replaceable;
1032 bool _is_non_escaping;
1033 // True when MemBar for new is redundant with MemBar at initialzer exit
1034 bool _is_allocation_MemBar_redundant;
1035
1036 virtual uint size_of() const; // Size is bigger
1037 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1038 Node *size, Node *klass_node, Node *initial_test);
1039 // Expansion modifies the JVMState, so we need to deep clone it
1040 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1041 virtual int Opcode() const;
1042 virtual uint ideal_reg() const { return Op_RegP; }
1043 virtual bool guaranteed_safepoint() { return false; }
1044
1045 // allocations do not modify their arguments
1046 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1047
1048 // Pattern-match a possible usage of AllocateNode.
1049 // Return null if no allocation is recognized.
1050 // The operand is the pointer produced by the (possible) allocation.
1051 // It must be a projection of the Allocate or its subsequent CastPP.
1052 // (Note: This function is defined in file graphKit.cpp, near
1053 // GraphKit::new_instance/new_array, whose output it recognizes.)
1054 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1055 static AllocateNode* Ideal_allocation(Node* ptr);
1056
1057 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1058 // an offset, which is reported back to the caller.
1059 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
1060 static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase,
1061 intptr_t& offset);
1062
1063 // Dig the klass operand out of a (possible) allocation site.
1064 static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
1065 AllocateNode* allo = Ideal_allocation(ptr);
1066 return (allo == nullptr) ? nullptr : allo->in(KlassNode);
1067 }
1068
1069 // Conservatively small estimate of offset of first non-header byte.
1070 int minimum_header_size() {
1071 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
1072 instanceOopDesc::base_offset_in_bytes();
1073 }
1074
1075 // Return the corresponding initialization barrier (or null if none).
1076 // Walks out edges to find it...
1077 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1078 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1079 InitializeNode* initialization();
1080
1081 // Convenience for initialization->maybe_set_complete(phase)
1082 bool maybe_set_complete(PhaseGVN* phase);
1083
1084 // Return true if allocation doesn't escape thread, its escape state
1085 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1086 // is true when its allocation's escape state is noEscape or
1087 // ArgEscape. In case allocation's InitializeNode is null, check
1088 // AlllocateNode._is_non_escaping flag.
1089 // AlllocateNode._is_non_escaping is true when its escape state is
1090 // noEscape.
1091 bool does_not_escape_thread() {
1092 InitializeNode* init = nullptr;
1093 return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1094 }
1095
1096 // If object doesn't escape in <.init> method and there is memory barrier
1097 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1098 // Inovke this method when MemBar at exit of initializer and post-dominate
1099 // allocation node.
1100 void compute_MemBar_redundancy(ciMethod* initializer);
1101 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1102
1103 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1104
1105 NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1106 };
1107
1108 //------------------------------AllocateArray---------------------------------
1109 //
1110 // High-level array allocation
1111 //
1112 class AllocateArrayNode : public AllocateNode {
1113 public:
1114 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1115 Node* initial_test, Node* count_val, Node* valid_length_test)
1116 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1117 initial_test)
1118 {
1119 init_class_id(Class_AllocateArray);
1120 set_req(AllocateNode::ALength, count_val);
1121 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1122 }
1123 virtual int Opcode() const;
1124
1125 // Dig the length operand out of a array allocation site.
1126 Node* Ideal_length() {
1127 return in(AllocateNode::ALength);
1128 }
1129
1130 // Dig the length operand out of a array allocation site and narrow the
1131 // type with a CastII, if necesssary
1132 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1133
1134 // Pattern-match a possible usage of AllocateArrayNode.
1135 // Return null if no allocation is recognized.
1136 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1137 AllocateNode* allo = Ideal_allocation(ptr);
1138 return (allo == nullptr || !allo->is_AllocateArray())
1139 ? nullptr : allo->as_AllocateArray();
1140 }
1141 };
1142
1143 //------------------------------AbstractLockNode-----------------------------------
1144 class AbstractLockNode: public CallNode {
1145 private:
1146 enum {
1147 Regular = 0, // Normal lock
1148 NonEscObj, // Lock is used for non escaping object
1149 Coarsened, // Lock was coarsened
1150 Nested // Nested lock
1151 } _kind;
1152
1153 static const char* _kind_names[Nested+1];
1154
1155 #ifndef PRODUCT
1156 NamedCounter* _counter;
1157 #endif
1158
1159 protected:
1160 // helper functions for lock elimination
1161 //
1162
1163 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1164 GrowableArray<AbstractLockNode*> &lock_ops);
1165 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1166 GrowableArray<AbstractLockNode*> &lock_ops);
1167 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1168 GrowableArray<AbstractLockNode*> &lock_ops);
1169 LockNode *find_matching_lock(UnlockNode* unlock);
1170
1171 // Update the counter to indicate that this lock was eliminated.
1172 void set_eliminated_lock_counter() PRODUCT_RETURN;
1173
1174 public:
1175 AbstractLockNode(const TypeFunc *tf)
1176 : CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
1177 _kind(Regular)
1178 {
1179 #ifndef PRODUCT
1180 _counter = nullptr;
1181 #endif
1182 }
1183 virtual int Opcode() const = 0;
1184 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
1185 Node * box_node() const {return in(TypeFunc::Parms + 1); }
1186 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
1187 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1188
1189 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1190
1191 virtual uint size_of() const { return sizeof(*this); }
1192
1193 bool is_eliminated() const { return (_kind != Regular); }
1194 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1195 bool is_coarsened() const { return (_kind == Coarsened); }
1196 bool is_nested() const { return (_kind == Nested); }
1197
1198 const char * kind_as_string() const;
1199 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
1200
1201 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1202 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
1203 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
1204
1205 // Check that all locks/unlocks associated with object come from balanced regions.
1206 // They can become unbalanced after coarsening optimization or on OSR entry.
1207 bool is_balanced();
1208
1209 // locking does not modify its arguments
1210 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
1211
1212 #ifndef PRODUCT
1213 void create_lock_counter(JVMState* s);
1214 NamedCounter* counter() const { return _counter; }
1215 virtual void dump_spec(outputStream* st) const;
1216 virtual void dump_compact_spec(outputStream* st) const;
1217 #endif
1218 };
1219
1220 //------------------------------Lock---------------------------------------
1221 // High-level lock operation
1222 //
1223 // This is a subclass of CallNode because it is a macro node which gets expanded
1224 // into a code sequence containing a call. This node takes 3 "parameters":
1225 // 0 - object to lock
1226 // 1 - a BoxLockNode
1227 // 2 - a FastLockNode
1228 //
1229 class LockNode : public AbstractLockNode {
1230 static const TypeFunc* _lock_type_Type;
1231 public:
1232
1233 static inline const TypeFunc* lock_type() {
1234 assert(_lock_type_Type != nullptr, "should be initialized");
1235 return _lock_type_Type;
1236 }
1237
1238 static void initialize_lock_Type() {
1239 assert(_lock_type_Type == nullptr, "should be called once");
1240 // create input type (domain)
1241 const Type **fields = TypeTuple::fields(3);
1242 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1243 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1244 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1245 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1246
1247 // create result type (range)
1248 fields = TypeTuple::fields(0);
1249
1250 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1251
1252 _lock_type_Type = TypeFunc::make(domain,range);
1253 }
1254
1255 virtual int Opcode() const;
1256 virtual uint size_of() const; // Size is bigger
1257 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1258 init_class_id(Class_Lock);
1259 init_flags(Flag_is_macro);
1260 C->add_macro_node(this);
1261 }
1262 virtual bool guaranteed_safepoint() { return false; }
1263
1264 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1265 // Expansion modifies the JVMState, so we need to deep clone it
1266 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1267
1268 bool is_nested_lock_region(); // Is this Lock nested?
1269 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1270 };
1271
1272 //------------------------------Unlock---------------------------------------
1273 // High-level unlock operation
1274 class UnlockNode : public AbstractLockNode {
1275 private:
1276 #ifdef ASSERT
1277 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects
1278 #endif
1279 public:
1280 virtual int Opcode() const;
1281 virtual uint size_of() const; // Size is bigger
1282 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1283 #ifdef ASSERT
1284 , _dbg_jvms(nullptr)
1285 #endif
1286 {
1287 init_class_id(Class_Unlock);
1288 init_flags(Flag_is_macro);
1289 C->add_macro_node(this);
1290 }
1291 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1292 // unlock is never a safepoint
1293 virtual bool guaranteed_safepoint() { return false; }
1294 #ifdef ASSERT
1295 void set_dbg_jvms(JVMState* s) {
1296 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor
1297 }
1298 JVMState* dbg_jvms() const { return _dbg_jvms; }
1299 #else
1300 JVMState* dbg_jvms() const { return nullptr; }
1301 #endif
1302 };
1303 #endif // SHARE_OPTO_CALLNODE_HPP