1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_CALLNODE_HPP
26 #define SHARE_OPTO_CALLNODE_HPP
27
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/replacednodes.hpp"
34 #include "opto/type.hpp"
35 #include "utilities/growableArray.hpp"
36
37 // Portions of code courtesy of Clifford Click
38
39 // Optimization - Graph Style
40
41 class NamedCounter;
42 class MultiNode;
43 class SafePointNode;
44 class CallNode;
45 class CallJavaNode;
46 class CallStaticJavaNode;
47 class CallDynamicJavaNode;
48 class CallRuntimeNode;
49 class CallLeafNode;
50 class CallLeafNoFPNode;
51 class CallLeafVectorNode;
52 class AllocateNode;
53 class AllocateArrayNode;
54 class AbstractLockNode;
55 class LockNode;
56 class UnlockNode;
57 class FastLockNode;
58
59 //------------------------------StartNode--------------------------------------
60 // The method start node
61 class StartNode : public MultiNode {
62 virtual bool cmp( const Node &n ) const;
63 virtual uint size_of() const; // Size is bigger
64 public:
65 const TypeTuple *_domain;
66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
67 init_class_id(Class_Start);
68 init_req(0,this);
69 init_req(1,root);
70 }
71 virtual int Opcode() const;
72 virtual bool pinned() const { return true; };
73 virtual const Type *bottom_type() const;
74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
75 virtual const Type* Value(PhaseGVN* phase) const;
76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
78 virtual const RegMask &in_RegMask(uint) const;
79 virtual Node *match( const ProjNode *proj, const Matcher *m );
80 virtual uint ideal_reg() const { return 0; }
81 #ifndef PRODUCT
82 virtual void dump_spec(outputStream *st) const;
83 virtual void dump_compact_spec(outputStream *st) const;
84 #endif
85 };
86
87 //------------------------------StartOSRNode-----------------------------------
88 // The method start node for on stack replacement code
89 class StartOSRNode : public StartNode {
90 public:
91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
92 virtual int Opcode() const;
93 static const TypeTuple *osr_domain();
94 };
95
96
97 //------------------------------ParmNode---------------------------------------
98 // Incoming parameters
99 class ParmNode : public ProjNode {
100 static const char * const names[TypeFunc::Parms+1];
101 public:
102 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
103 init_class_id(Class_Parm);
104 }
105 virtual int Opcode() const;
106 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
107 virtual uint ideal_reg() const;
108 #ifndef PRODUCT
109 virtual void dump_spec(outputStream *st) const;
110 virtual void dump_compact_spec(outputStream *st) const;
111 #endif
112 };
113
114
115 //------------------------------ReturnNode-------------------------------------
116 // Return from subroutine node
117 class ReturnNode : public Node {
118 public:
119 ReturnNode(uint edges, Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr);
120 virtual int Opcode() const;
121 virtual bool is_CFG() const { return true; }
122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
123 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
124 virtual const Type* Value(PhaseGVN* phase) const;
125 virtual uint ideal_reg() const { return NotAMachineReg; }
126 virtual uint match_edge(uint idx) const;
127 #ifndef PRODUCT
128 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
129 #endif
130 };
131
132
133 //------------------------------RethrowNode------------------------------------
134 // Rethrow of exception at call site. Ends a procedure before rethrowing;
135 // ends the current basic block like a ReturnNode. Restores registers and
136 // unwinds stack. Rethrow happens in the caller's method.
137 class RethrowNode : public Node {
138 public:
139 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
140 virtual int Opcode() const;
141 virtual bool is_CFG() const { return true; }
142 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
143 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
144 virtual const Type* Value(PhaseGVN* phase) const;
145 virtual uint match_edge(uint idx) const;
146 virtual uint ideal_reg() const { return NotAMachineReg; }
147 #ifndef PRODUCT
148 virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
149 #endif
150 };
151
152
153 //------------------------------ForwardExceptionNode---------------------------
154 // Pop stack frame and jump to StubRoutines::forward_exception_entry()
155 class ForwardExceptionNode : public ReturnNode {
156 public:
157 ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr)
158 : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) {
159 }
160
161 virtual int Opcode() const;
162 };
163
164 //------------------------------TailCallNode-----------------------------------
165 // Pop stack frame and jump indirect
166 class TailCallNode : public ReturnNode {
167 public:
168 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
169 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
170 init_req(TypeFunc::Parms, target);
171 init_req(TypeFunc::Parms+1, moop);
172 }
173
174 virtual int Opcode() const;
175 virtual uint match_edge(uint idx) const;
176 };
177
178 //------------------------------TailJumpNode-----------------------------------
179 // Pop stack frame and jump indirect
180 class TailJumpNode : public ReturnNode {
181 public:
182 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
183 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
184 init_req(TypeFunc::Parms, target);
185 init_req(TypeFunc::Parms+1, ex_oop);
186 }
187
188 virtual int Opcode() const;
189 virtual uint match_edge(uint idx) const;
190 };
191
192 //-------------------------------JVMState-------------------------------------
193 // A linked list of JVMState nodes captures the whole interpreter state,
194 // plus GC roots, for all active calls at some call site in this compilation
195 // unit. (If there is no inlining, then the list has exactly one link.)
196 // This provides a way to map the optimized program back into the interpreter,
197 // or to let the GC mark the stack.
198 class JVMState : public ResourceObj {
199 public:
200 typedef enum {
201 Reexecute_Undefined = -1, // not defined -- will be translated into false later
202 Reexecute_False = 0, // false -- do not reexecute
203 Reexecute_True = 1 // true -- reexecute the bytecode
204 } ReexecuteState; //Reexecute State
205
206 private:
207 JVMState* _caller; // List pointer for forming scope chains
208 uint _depth; // One more than caller depth, or one.
209 uint _locoff; // Offset to locals in input edge mapping
210 uint _stkoff; // Offset to stack in input edge mapping
211 uint _monoff; // Offset to monitors in input edge mapping
212 uint _scloff; // Offset to fields of scalar objs in input edge mapping
213 uint _endoff; // Offset to end of input edge mapping
214 uint _sp; // Java Expression Stack Pointer for this state
215 int _bci; // Byte Code Index of this JVM point
216 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
217 ciMethod* _method; // Method Pointer
218 ciInstance* _receiver_info; // Constant receiver instance for compiled lambda forms
219 SafePointNode* _map; // Map node associated with this scope
220 public:
221 friend class Compile;
222 friend class PreserveReexecuteState;
223
224 // Because JVMState objects live over the entire lifetime of the
225 // Compile object, they are allocated into the comp_arena, which
226 // does not get resource marked or reset during the compile process
227 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
228 void operator delete( void * ) { } // fast deallocation
229
230 // Create a new JVMState, ready for abstract interpretation.
231 JVMState(ciMethod* method, JVMState* caller);
232 JVMState(int stack_size); // root state; has a null method
233
234 // Access functions for the JVM
235 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
236 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
237 uint locoff() const { return _locoff; }
238 uint stkoff() const { return _stkoff; }
239 uint argoff() const { return _stkoff + _sp; }
240 uint monoff() const { return _monoff; }
241 uint scloff() const { return _scloff; }
242 uint endoff() const { return _endoff; }
243 uint oopoff() const { return debug_end(); }
244
245 int loc_size() const { return stkoff() - locoff(); }
246 int stk_size() const { return monoff() - stkoff(); }
247 int mon_size() const { return scloff() - monoff(); }
248 int scl_size() const { return endoff() - scloff(); }
249
250 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
251 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
252 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
253 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
254
255 uint sp() const { return _sp; }
256 int bci() const { return _bci; }
257 bool should_reexecute() const { return _reexecute==Reexecute_True; }
258 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
259 bool has_method() const { return _method != nullptr; }
260 ciMethod* method() const { assert(has_method(), ""); return _method; }
261 ciInstance* receiver_info() const { assert(has_method(), ""); return _receiver_info; }
262 JVMState* caller() const { return _caller; }
263 SafePointNode* map() const { return _map; }
264 uint depth() const { return _depth; }
265 uint debug_start() const; // returns locoff of root caller
266 uint debug_end() const; // returns endoff of self
267 uint debug_size() const {
268 return loc_size() + sp() + mon_size() + scl_size();
269 }
270 uint debug_depth() const; // returns sum of debug_size values at all depths
271
272 // Returns the JVM state at the desired depth (1 == root).
273 JVMState* of_depth(int d) const;
274
275 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
276 bool same_calls_as(const JVMState* that) const;
277
278 // Monitors (monitors are stored as (boxNode, objNode) pairs
279 enum { logMonitorEdges = 1 };
280 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
281 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
282 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
283 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
284 bool is_monitor_box(uint off) const {
285 assert(is_mon(off), "should be called only for monitor edge");
286 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
287 }
288 bool is_monitor_use(uint off) const { return (is_mon(off)
289 && is_monitor_box(off))
290 || (caller() && caller()->is_monitor_use(off)); }
291
292 // Initialization functions for the JVM
293 void set_locoff(uint off) { _locoff = off; }
294 void set_stkoff(uint off) { _stkoff = off; }
295 void set_monoff(uint off) { _monoff = off; }
296 void set_scloff(uint off) { _scloff = off; }
297 void set_endoff(uint off) { _endoff = off; }
298 void set_offsets(uint off) {
299 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
300 }
301 void set_map(SafePointNode* map) { _map = map; }
302 void bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
303 void set_sp(uint sp) { _sp = sp; }
304 // _reexecute is initialized to "undefined" for a new bci
305 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
306 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
307 void set_receiver_info(ciInstance* recv) { assert(has_method() || recv == nullptr, ""); _receiver_info = recv; }
308
309 // Miscellaneous utility functions
310 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
311 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
312 void set_map_deep(SafePointNode *map);// reset map for all callers
313 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
314 int interpreter_frame_size() const;
315 ciInstance* compute_receiver_info(ciMethod* callee) const;
316
317 #ifndef PRODUCT
318 void print_method_with_lineno(outputStream* st, bool show_name) const;
319 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
320 void dump_spec(outputStream *st) const;
321 void dump_on(outputStream* st) const;
322 void dump() const {
323 dump_on(tty);
324 }
325 #endif
326 };
327
328 //------------------------------SafePointNode----------------------------------
329 // A SafePointNode is a subclass of a MultiNode for convenience (and
330 // potential code sharing) only - conceptually it is independent of
331 // the Node semantics.
332 class SafePointNode : public MultiNode {
333 friend JVMState;
334 friend class GraphKit;
335 friend class LibraryCallKit;
336
337 virtual bool cmp( const Node &n ) const;
338 virtual uint size_of() const; // Size is bigger
339
340 protected:
341 JVMState* const _jvms; // Pointer to list of JVM State objects
342 // Many calls take *all* of memory as input,
343 // but some produce a limited subset of that memory as output.
344 // The adr_type reports the call's behavior as a store, not a load.
345 const TypePtr* _adr_type; // What type of memory does this node produce?
346 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
347 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
348
349 void set_jvms(JVMState* s) {
350 assert(s != nullptr, "assign null value to _jvms");
351 *(JVMState**)&_jvms = s; // override const attribute in the accessor
352 }
353 public:
354 SafePointNode(uint edges, JVMState* jvms,
355 // A plain safepoint advertises no memory effects (null):
356 const TypePtr* adr_type = nullptr)
357 : MultiNode( edges ),
358 _jvms(jvms),
359 _adr_type(adr_type),
360 _has_ea_local_in_scope(false)
361 {
362 init_class_id(Class_SafePoint);
363 }
364
365 JVMState* jvms() const { return _jvms; }
366 virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
367 void clone_jvms(Compile* C) {
368 if (jvms() != nullptr) {
369 if (needs_deep_clone_jvms(C)) {
370 set_jvms(jvms()->clone_deep(C));
371 jvms()->set_map_deep(this);
372 } else {
373 jvms()->clone_shallow(C)->bind_map(this);
374 }
375 }
376 }
377
378 private:
379 void verify_input(const JVMState* jvms, uint idx) const {
380 assert(verify_jvms(jvms), "jvms must match");
381 Node* n = in(idx);
382 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
383 in(idx + 1)->is_top(), "2nd half of long/double");
384 }
385
386 public:
387 // Functionality from old debug nodes which has changed
388 Node* local(const JVMState* jvms, uint idx) const {
389 uint loc_idx = jvms->locoff() + idx;
390 assert(jvms->is_loc(loc_idx), "not a local slot");
391 verify_input(jvms, loc_idx);
392 return in(loc_idx);
393 }
394 Node* stack(const JVMState* jvms, uint idx) const {
395 uint stk_idx = jvms->stkoff() + idx;
396 assert(jvms->is_stk(stk_idx), "not a stack slot");
397 verify_input(jvms, stk_idx);
398 return in(stk_idx);
399 }
400 Node* argument(const JVMState* jvms, uint idx) const {
401 uint arg_idx = jvms->argoff() + idx;
402 assert(jvms->is_stk(arg_idx), "not an argument slot");
403 verify_input(jvms, arg_idx);
404 return in(jvms->argoff() + idx);
405 }
406 Node* monitor_box(const JVMState* jvms, uint idx) const {
407 assert(verify_jvms(jvms), "jvms must match");
408 uint mon_box_idx = jvms->monitor_box_offset(idx);
409 assert(jvms->is_monitor_box(mon_box_idx), "not a monitor box offset");
410 return in(mon_box_idx);
411 }
412 Node* monitor_obj(const JVMState* jvms, uint idx) const {
413 assert(verify_jvms(jvms), "jvms must match");
414 uint mon_obj_idx = jvms->monitor_obj_offset(idx);
415 assert(jvms->is_mon(mon_obj_idx) && !jvms->is_monitor_box(mon_obj_idx), "not a monitor obj offset");
416 return in(mon_obj_idx);
417 }
418
419 void set_local(const JVMState* jvms, uint idx, Node *c);
420
421 void set_stack(const JVMState* jvms, uint idx, Node *c) {
422 assert(verify_jvms(jvms), "jvms must match");
423 set_req(jvms->stkoff() + idx, c);
424 }
425 void set_argument(const JVMState* jvms, uint idx, Node *c) {
426 assert(verify_jvms(jvms), "jvms must match");
427 set_req(jvms->argoff() + idx, c);
428 }
429 void ensure_stack(JVMState* jvms, uint stk_size) {
430 assert(verify_jvms(jvms), "jvms must match");
431 int grow_by = (int)stk_size - (int)jvms->stk_size();
432 if (grow_by > 0) grow_stack(jvms, grow_by);
433 }
434 void grow_stack(JVMState* jvms, uint grow_by);
435 // Handle monitor stack
436 void push_monitor( const FastLockNode *lock );
437 void pop_monitor ();
438 Node *peek_monitor_box() const;
439 Node *peek_monitor_obj() const;
440 // Peek Operand Stacks, JVMS 2.6.2
441 Node* peek_operand(uint off = 0) const;
442
443 // Access functions for the JVM
444 Node *control () const { return in(TypeFunc::Control ); }
445 Node *i_o () const { return in(TypeFunc::I_O ); }
446 Node *memory () const { return in(TypeFunc::Memory ); }
447 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
448 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
449
450 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
451 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
452 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
453
454 MergeMemNode* merged_memory() const {
455 return in(TypeFunc::Memory)->as_MergeMem();
456 }
457
458 // The parser marks useless maps as dead when it's done with them:
459 bool is_killed() { return in(TypeFunc::Control) == nullptr; }
460
461 // Exception states bubbling out of subgraphs such as inlined calls
462 // are recorded here. (There might be more than one, hence the "next".)
463 // This feature is used only for safepoints which serve as "maps"
464 // for JVM states during parsing, intrinsic expansion, etc.
465 SafePointNode* next_exception() const;
466 void set_next_exception(SafePointNode* n);
467 bool has_exceptions() const { return next_exception() != nullptr; }
468
469 // Helper methods to operate on replaced nodes
470 ReplacedNodes replaced_nodes() const {
471 return _replaced_nodes;
472 }
473
474 void set_replaced_nodes(ReplacedNodes replaced_nodes) {
475 _replaced_nodes = replaced_nodes;
476 }
477
478 void clone_replaced_nodes() {
479 _replaced_nodes.clone();
480 }
481 void record_replaced_node(Node* initial, Node* improved) {
482 _replaced_nodes.record(initial, improved);
483 }
484 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
485 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
486 }
487 void delete_replaced_nodes() {
488 _replaced_nodes.reset();
489 }
490 void apply_replaced_nodes(uint idx) {
491 _replaced_nodes.apply(this, idx);
492 }
493 void merge_replaced_nodes_with(SafePointNode* sfpt) {
494 _replaced_nodes.merge_with(sfpt->_replaced_nodes);
495 }
496 bool has_replaced_nodes() const {
497 return !_replaced_nodes.is_empty();
498 }
499 void set_has_ea_local_in_scope(bool b) {
500 _has_ea_local_in_scope = b;
501 }
502 bool has_ea_local_in_scope() const {
503 return _has_ea_local_in_scope;
504 }
505
506 void disconnect_from_root(PhaseIterGVN *igvn);
507
508 // Standard Node stuff
509 virtual int Opcode() const;
510 virtual bool pinned() const { return true; }
511 virtual const Type* Value(PhaseGVN* phase) const;
512 virtual const Type* bottom_type() const { return Type::CONTROL; }
513 virtual const TypePtr* adr_type() const { return _adr_type; }
514 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
515 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
516 virtual Node* Identity(PhaseGVN* phase);
517 virtual uint ideal_reg() const { return 0; }
518 virtual const RegMask &in_RegMask(uint) const;
519 virtual const RegMask &out_RegMask() const;
520 virtual uint match_edge(uint idx) const;
521
522 #ifndef PRODUCT
523 virtual void dump_spec(outputStream *st) const;
524 #endif
525 };
526
527 //------------------------------SafePointScalarObjectNode----------------------
528 // A SafePointScalarObjectNode represents the state of a scalarized object
529 // at a safepoint.
530 class SafePointScalarObjectNode: public TypeNode {
531 uint _first_index; // First input edge relative index of a SafePoint node where
532 // states of the scalarized object fields are collected.
533 uint _depth; // Depth of the JVM state the _first_index field refers to
534 uint _n_fields; // Number of non-static fields of the scalarized object.
535
536 Node* _alloc; // Just for debugging purposes.
537
538 virtual uint hash() const;
539 virtual bool cmp( const Node &n ) const;
540
541 uint first_index() const { return _first_index; }
542
543 public:
544 SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields);
545
546 virtual int Opcode() const;
547 virtual uint ideal_reg() const;
548 virtual const RegMask &in_RegMask(uint) const;
549 virtual const RegMask &out_RegMask() const;
550 virtual uint match_edge(uint idx) const;
551
552 uint first_index(JVMState* jvms) const {
553 assert(jvms != nullptr, "missed JVMS");
554 return jvms->of_depth(_depth)->scloff() + _first_index;
555 }
556 uint n_fields() const { return _n_fields; }
557
558 #ifdef ASSERT
559 Node* alloc() const { return _alloc; }
560 #endif
561
562 virtual uint size_of() const { return sizeof(*this); }
563
564 // Assumes that "this" is an argument to a safepoint node "s", and that
565 // "new_call" is being created to correspond to "s". But the difference
566 // between the start index of the jvmstates of "new_call" and "s" is
567 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
568 // corresponds appropriately to "this" in "new_call". Assumes that
569 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
570 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
571 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
572
573 #ifndef PRODUCT
574 virtual void dump_spec(outputStream *st) const;
575 #endif
576 };
577
578 //------------------------------SafePointScalarMergeNode----------------------
579 //
580 // This class represents an allocation merge that is used as debug information
581 // and had at least one of its input scalar replaced.
582 //
583 // The required inputs of this node, except the control, are pointers to
584 // SafePointScalarObjectNodes that describe scalarized inputs of the original
585 // allocation merge. The other(s) properties of the class are described below.
586 //
587 // _merge_pointer_idx : index in the SafePointNode's input array where the
588 // description of the _allocation merge_ starts. The index is zero based and
589 // relative to the SafePoint's scloff. The two entries in the SafePointNode's
590 // input array starting at '_merge_pointer_idx` are Phi nodes representing:
591 //
592 // 1) The original merge Phi. During rematerialization this input will only be
593 // used if the "selector Phi" (see below) indicates that the execution of the
594 // Phi took the path of a non scalarized input.
595 //
596 // 2) A "selector Phi". The output of this Phi will be '-1' if the execution
597 // of the method exercised a non scalarized input of the original Phi.
598 // Otherwise, the output will be >=0, and it will indicate the index-1 in the
599 // SafePointScalarMergeNode input array where the description of the
600 // scalarized object that should be used is.
601 //
602 // As an example, consider a Phi merging 3 inputs, of which the last 2 are
603 // scalar replaceable.
604 //
605 // Phi(Region, NSR, SR, SR)
606 //
607 // During scalar replacement the SR inputs will be changed to null:
608 //
609 // Phi(Region, NSR, nullptr, nullptr)
610 //
611 // A corresponding selector Phi will be created with a configuration like this:
612 //
613 // Phi(Region, -1, 0, 1)
614 //
615 // During execution of the compiled method, if the execution reaches a Trap, the
616 // output of the selector Phi will tell if we need to rematerialize one of the
617 // scalar replaced inputs or if we should just use the pointer returned by the
618 // original Phi.
619
620 class SafePointScalarMergeNode: public TypeNode {
621 int _merge_pointer_idx; // This is the first input edge relative
622 // index of a SafePoint node where metadata information relative
623 // to restoring the merge is stored. The corresponding input
624 // in the associated SafePoint will point to a Phi representing
625 // potential non-scalar replaced objects.
626
627 virtual uint hash() const;
628 virtual bool cmp( const Node &n ) const;
629
630 public:
631 SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx);
632
633 virtual int Opcode() const;
634 virtual uint ideal_reg() const;
635 virtual const RegMask &in_RegMask(uint) const;
636 virtual const RegMask &out_RegMask() const;
637 virtual uint match_edge(uint idx) const;
638
639 virtual uint size_of() const { return sizeof(*this); }
640
641 int merge_pointer_idx(JVMState* jvms) const {
642 assert(jvms != nullptr, "JVMS reference is null.");
643 return jvms->scloff() + _merge_pointer_idx;
644 }
645
646 int selector_idx(JVMState* jvms) const {
647 assert(jvms != nullptr, "JVMS reference is null.");
648 return jvms->scloff() + _merge_pointer_idx + 1;
649 }
650
651 // Assumes that "this" is an argument to a safepoint node "s", and that
652 // "new_call" is being created to correspond to "s". But the difference
653 // between the start index of the jvmstates of "new_call" and "s" is
654 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
655 // corresponds appropriately to "this" in "new_call". Assumes that
656 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
657 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
658 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
659
660 #ifndef PRODUCT
661 virtual void dump_spec(outputStream *st) const;
662 #endif
663 };
664
665 // Simple container for the outgoing projections of a call. Useful
666 // for serious surgery on calls.
667 class CallProjections : public StackObj {
668 public:
669 Node* fallthrough_proj;
670 Node* fallthrough_catchproj;
671 Node* fallthrough_memproj;
672 Node* fallthrough_ioproj;
673 Node* catchall_catchproj;
674 Node* catchall_memproj;
675 Node* catchall_ioproj;
676 Node* resproj;
677 Node* exobj;
678 };
679
680 class CallGenerator;
681
682 //------------------------------CallNode---------------------------------------
683 // Call nodes now subsume the function of debug nodes at callsites, so they
684 // contain the functionality of a full scope chain of debug nodes.
685 class CallNode : public SafePointNode {
686
687 protected:
688 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
689
690 public:
691 const TypeFunc* _tf; // Function type
692 address _entry_point; // Address of method being called
693 float _cnt; // Estimate of number of times called
694 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
695 const char* _name; // Printable name, if _method is null
696
697 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
698 : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
699 _tf(tf),
700 _entry_point(addr),
701 _cnt(COUNT_UNKNOWN),
702 _generator(nullptr),
703 _name(nullptr)
704 {
705 init_class_id(Class_Call);
706 }
707
708 const TypeFunc* tf() const { return _tf; }
709 address entry_point() const { return _entry_point; }
710 float cnt() const { return _cnt; }
711 CallGenerator* generator() const { return _generator; }
712
713 void set_tf(const TypeFunc* tf) { _tf = tf; }
714 void set_entry_point(address p) { _entry_point = p; }
715 void set_cnt(float c) { _cnt = c; }
716 void set_generator(CallGenerator* cg) { _generator = cg; }
717
718 virtual const Type* bottom_type() const;
719 virtual const Type* Value(PhaseGVN* phase) const;
720 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
721 virtual Node* Identity(PhaseGVN* phase) { return this; }
722 virtual bool cmp(const Node &n) const;
723 virtual uint size_of() const = 0;
724 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
725 virtual Node* match(const ProjNode* proj, const Matcher* m);
726 virtual uint ideal_reg() const { return NotAMachineReg; }
727 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
728 // for some macro nodes whose expansion does not have a safepoint on the fast path.
729 virtual bool guaranteed_safepoint() { return true; }
730 // For macro nodes, the JVMState gets modified during expansion. If calls
731 // use MachConstantBase, it gets modified during matching. If the call is
732 // late inlined, it also needs the full JVMState. So when cloning the
733 // node the JVMState must be deep cloned. Default is to shallow clone.
734 virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
735
736 // Returns true if the call may modify n
737 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
738 // Does this node have a use of n other than in debug information?
739 bool has_non_debug_use(Node* n);
740 // Returns the unique CheckCastPP of a call
741 // or result projection is there are several CheckCastPP
742 // or returns null if there is no one.
743 Node* result_cast();
744 // Does this node returns pointer?
745 bool returns_pointer() const {
746 const TypeTuple* r = tf()->range();
747 return (r->cnt() > TypeFunc::Parms &&
748 r->field_at(TypeFunc::Parms)->isa_ptr());
749 }
750
751 // Collect all the interesting edges from a call for use in
752 // replacing the call by something else. Used by macro expansion
753 // and the late inlining support.
754 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
755
756 virtual uint match_edge(uint idx) const;
757
758 bool is_call_to_arraycopystub() const;
759 bool is_call_to_multianewarray_stub() const;
760
761 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
762
763 #ifndef PRODUCT
764 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
765 virtual void dump_spec(outputStream* st) const;
766 #endif
767 };
768
769
770 //------------------------------CallJavaNode-----------------------------------
771 // Make a static or dynamic subroutine call node using Java calling
772 // convention. (The "Java" calling convention is the compiler's calling
773 // convention, as opposed to the interpreter's or that of native C.)
774 class CallJavaNode : public CallNode {
775 protected:
776 virtual bool cmp( const Node &n ) const;
777 virtual uint size_of() const; // Size is bigger
778
779 ciMethod* _method; // Method being direct called
780 bool _optimized_virtual;
781 bool _override_symbolic_info; // Override symbolic call site info from bytecode
782 bool _arg_escape; // ArgEscape in parameter list
783 public:
784 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
785 : CallNode(tf, addr, TypePtr::BOTTOM),
786 _method(method),
787 _optimized_virtual(false),
788 _override_symbolic_info(false),
789 _arg_escape(false)
790 {
791 init_class_id(Class_CallJava);
792 }
793
794 virtual int Opcode() const;
795 ciMethod* method() const { return _method; }
796 void set_method(ciMethod *m) { _method = m; }
797 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
798 bool is_optimized_virtual() const { return _optimized_virtual; }
799 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
800 bool override_symbolic_info() const { return _override_symbolic_info; }
801 void set_arg_escape(bool f) { _arg_escape = f; }
802 bool arg_escape() const { return _arg_escape; }
803 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
804 void register_for_late_inline();
805
806 DEBUG_ONLY( bool validate_symbolic_info() const; )
807
808 #ifndef PRODUCT
809 virtual void dump_spec(outputStream *st) const;
810 virtual void dump_compact_spec(outputStream *st) const;
811 #endif
812 };
813
814 //------------------------------CallStaticJavaNode-----------------------------
815 // Make a direct subroutine call using Java calling convention (for static
816 // calls and optimized virtual calls, plus calls to wrappers for run-time
817 // routines); generates static stub.
818 class CallStaticJavaNode : public CallJavaNode {
819 virtual bool cmp( const Node &n ) const;
820 virtual uint size_of() const; // Size is bigger
821 public:
822 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
823 : CallJavaNode(tf, addr, method) {
824 init_class_id(Class_CallStaticJava);
825 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
826 init_flags(Flag_is_macro);
827 C->add_macro_node(this);
828 }
829 }
830 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
831 : CallJavaNode(tf, addr, nullptr) {
832 init_class_id(Class_CallStaticJava);
833 // This node calls a runtime stub, which often has narrow memory effects.
834 _adr_type = adr_type;
835 _name = name;
836 }
837
838 // If this is an uncommon trap, return the request code, else zero.
839 int uncommon_trap_request() const;
840 bool is_uncommon_trap() const;
841 static int extract_uncommon_trap_request(const Node* call);
842
843 bool is_boxing_method() const {
844 return is_macro() && (method() != nullptr) && method()->is_boxing_method();
845 }
846 // Late inlining modifies the JVMState, so we need to deep clone it
847 // when the call node is cloned (because it is macro node).
848 virtual bool needs_deep_clone_jvms(Compile* C) {
849 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
850 }
851
852 virtual int Opcode() const;
853 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
854
855 #ifndef PRODUCT
856 virtual void dump_spec(outputStream *st) const;
857 virtual void dump_compact_spec(outputStream *st) const;
858 #endif
859 };
860
861 //------------------------------CallDynamicJavaNode----------------------------
862 // Make a dispatched call using Java calling convention.
863 class CallDynamicJavaNode : public CallJavaNode {
864 virtual bool cmp( const Node &n ) const;
865 virtual uint size_of() const; // Size is bigger
866 public:
867 CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
868 : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
869 init_class_id(Class_CallDynamicJava);
870 }
871
872 // Late inlining modifies the JVMState, so we need to deep clone it
873 // when the call node is cloned.
874 virtual bool needs_deep_clone_jvms(Compile* C) {
875 return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
876 }
877
878 int _vtable_index;
879 virtual int Opcode() const;
880 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
881 #ifndef PRODUCT
882 virtual void dump_spec(outputStream *st) const;
883 #endif
884 };
885
886 //------------------------------CallRuntimeNode--------------------------------
887 // Make a direct subroutine call node into compiled C++ code.
888 class CallRuntimeNode : public CallNode {
889 protected:
890 virtual bool cmp( const Node &n ) const;
891 virtual uint size_of() const; // Size is bigger
892 public:
893 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
894 const TypePtr* adr_type, JVMState* jvms = nullptr)
895 : CallNode(tf, addr, adr_type, jvms)
896 {
897 init_class_id(Class_CallRuntime);
898 _name = name;
899 }
900
901 virtual int Opcode() const;
902 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
903
904 #ifndef PRODUCT
905 virtual void dump_spec(outputStream *st) const;
906 #endif
907 };
908
909 //------------------------------CallLeafNode-----------------------------------
910 // Make a direct subroutine call node into compiled C++ code, without
911 // safepoints
912 class CallLeafNode : public CallRuntimeNode {
913 public:
914 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
915 const TypePtr* adr_type)
916 : CallRuntimeNode(tf, addr, name, adr_type)
917 {
918 init_class_id(Class_CallLeaf);
919 }
920 virtual int Opcode() const;
921 virtual bool guaranteed_safepoint() { return false; }
922 #ifndef PRODUCT
923 virtual void dump_spec(outputStream *st) const;
924 #endif
925 };
926
927 /* A pure function call, they are assumed not to be safepoints, not to read or write memory,
928 * have no exception... They just take parameters, return a value without side effect. It is
929 * always correct to create some, or remove them, if the result is not used.
930 *
931 * They still have control input to allow easy lowering into other kind of calls that require
932 * a control, but this is more a technical than a moral constraint.
933 *
934 * Pure calls must have only control and data input and output: I/O, Memory and so on must be top.
935 * Nevertheless, pure calls can typically be expensive math operations so care must be taken
936 * when letting the node float.
937 */
938 class CallLeafPureNode : public CallLeafNode {
939 protected:
940 bool is_unused() const;
941 bool is_dead() const;
942 TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const;
943
944 public:
945 CallLeafPureNode(const TypeFunc* tf, address addr, const char* name)
946 : CallLeafNode(tf, addr, name, nullptr) {
947 init_class_id(Class_CallLeafPure);
948 }
949 int Opcode() const override;
950 Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
951 };
952
953 //------------------------------CallLeafNoFPNode-------------------------------
954 // CallLeafNode, not using floating point or using it in the same manner as
955 // the generated code
956 class CallLeafNoFPNode : public CallLeafNode {
957 public:
958 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
959 const TypePtr* adr_type)
960 : CallLeafNode(tf, addr, name, adr_type)
961 {
962 init_class_id(Class_CallLeafNoFP);
963 }
964 virtual int Opcode() const;
965 };
966
967 //------------------------------CallLeafVectorNode-------------------------------
968 // CallLeafNode but calling with vector calling convention instead.
969 class CallLeafVectorNode : public CallLeafNode {
970 private:
971 uint _num_bits;
972 protected:
973 virtual bool cmp( const Node &n ) const;
974 virtual uint size_of() const; // Size is bigger
975 public:
976 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
977 const TypePtr* adr_type, uint num_bits)
978 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
979 {
980 }
981 virtual int Opcode() const;
982 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
983 };
984
985
986 //------------------------------Allocate---------------------------------------
987 // High-level memory allocation
988 //
989 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
990 // get expanded into a code sequence containing a call. Unlike other CallNodes,
991 // they have 2 memory projections and 2 i_o projections (which are distinguished by
992 // the _is_io_use flag in the projection.) This is needed when expanding the node in
993 // order to differentiate the uses of the projection on the normal control path from
994 // those on the exception return path.
995 //
996 class AllocateNode : public CallNode {
997 public:
998 enum {
999 // Output:
1000 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
1001 // Inputs:
1002 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
1003 KlassNode, // type (maybe dynamic) of the obj.
1004 InitialTest, // slow-path test (may be constant)
1005 ALength, // array length (or TOP if none)
1006 ValidLengthTest,
1007 ParmLimit
1008 };
1009
1010 static const TypeFunc* alloc_type(const Type* t) {
1011 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1012 fields[AllocSize] = TypeInt::POS;
1013 fields[KlassNode] = TypeInstPtr::NOTNULL;
1014 fields[InitialTest] = TypeInt::BOOL;
1015 fields[ALength] = t; // length (can be a bad length)
1016 fields[ValidLengthTest] = TypeInt::BOOL;
1017
1018 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1019
1020 // create result type (range)
1021 fields = TypeTuple::fields(1);
1022 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1023
1024 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1025
1026 return TypeFunc::make(domain, range);
1027 }
1028
1029 // Result of Escape Analysis
1030 bool _is_scalar_replaceable;
1031 bool _is_non_escaping;
1032 // True when MemBar for new is redundant with MemBar at initialzer exit
1033 bool _is_allocation_MemBar_redundant;
1034
1035 virtual uint size_of() const; // Size is bigger
1036 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1037 Node *size, Node *klass_node, Node *initial_test);
1038 // Expansion modifies the JVMState, so we need to deep clone it
1039 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1040 virtual int Opcode() const;
1041 virtual uint ideal_reg() const { return Op_RegP; }
1042 virtual bool guaranteed_safepoint() { return false; }
1043
1044 // allocations do not modify their arguments
1045 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1046
1047 // Pattern-match a possible usage of AllocateNode.
1048 // Return null if no allocation is recognized.
1049 // The operand is the pointer produced by the (possible) allocation.
1050 // It must be a projection of the Allocate or its subsequent CastPP.
1051 // (Note: This function is defined in file graphKit.cpp, near
1052 // GraphKit::new_instance/new_array, whose output it recognizes.)
1053 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1054 static AllocateNode* Ideal_allocation(Node* ptr);
1055
1056 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1057 // an offset, which is reported back to the caller.
1058 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
1059 static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase,
1060 intptr_t& offset);
1061
1062 // Dig the klass operand out of a (possible) allocation site.
1063 static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
1064 AllocateNode* allo = Ideal_allocation(ptr);
1065 return (allo == nullptr) ? nullptr : allo->in(KlassNode);
1066 }
1067
1068 // Conservatively small estimate of offset of first non-header byte.
1069 int minimum_header_size() {
1070 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
1071 instanceOopDesc::base_offset_in_bytes();
1072 }
1073
1074 // Return the corresponding initialization barrier (or null if none).
1075 // Walks out edges to find it...
1076 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1077 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1078 InitializeNode* initialization();
1079
1080 // Convenience for initialization->maybe_set_complete(phase)
1081 bool maybe_set_complete(PhaseGVN* phase);
1082
1083 // Return true if allocation doesn't escape thread, its escape state
1084 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1085 // is true when its allocation's escape state is noEscape or
1086 // ArgEscape. In case allocation's InitializeNode is null, check
1087 // AlllocateNode._is_non_escaping flag.
1088 // AlllocateNode._is_non_escaping is true when its escape state is
1089 // noEscape.
1090 bool does_not_escape_thread() {
1091 InitializeNode* init = nullptr;
1092 return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1093 }
1094
1095 // If object doesn't escape in <.init> method and there is memory barrier
1096 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1097 // Inovke this method when MemBar at exit of initializer and post-dominate
1098 // allocation node.
1099 void compute_MemBar_redundancy(ciMethod* initializer);
1100 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1101
1102 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1103
1104 NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1105 };
1106
1107 //------------------------------AllocateArray---------------------------------
1108 //
1109 // High-level array allocation
1110 //
1111 class AllocateArrayNode : public AllocateNode {
1112 public:
1113 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1114 Node* initial_test, Node* count_val, Node* valid_length_test)
1115 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1116 initial_test)
1117 {
1118 init_class_id(Class_AllocateArray);
1119 set_req(AllocateNode::ALength, count_val);
1120 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1121 }
1122 virtual int Opcode() const;
1123
1124 // Dig the length operand out of a array allocation site.
1125 Node* Ideal_length() {
1126 return in(AllocateNode::ALength);
1127 }
1128
1129 // Dig the length operand out of a array allocation site and narrow the
1130 // type with a CastII, if necesssary
1131 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1132
1133 // Pattern-match a possible usage of AllocateArrayNode.
1134 // Return null if no allocation is recognized.
1135 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1136 AllocateNode* allo = Ideal_allocation(ptr);
1137 return (allo == nullptr || !allo->is_AllocateArray())
1138 ? nullptr : allo->as_AllocateArray();
1139 }
1140 };
1141
1142 //------------------------------AbstractLockNode-----------------------------------
1143 class AbstractLockNode: public CallNode {
1144 private:
1145 enum {
1146 Regular = 0, // Normal lock
1147 NonEscObj, // Lock is used for non escaping object
1148 Coarsened, // Lock was coarsened
1149 Nested // Nested lock
1150 } _kind;
1151
1152 static const char* _kind_names[Nested+1];
1153
1154 #ifndef PRODUCT
1155 NamedCounter* _counter;
1156 #endif
1157
1158 protected:
1159 // helper functions for lock elimination
1160 //
1161
1162 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1163 GrowableArray<AbstractLockNode*> &lock_ops);
1164 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1165 GrowableArray<AbstractLockNode*> &lock_ops);
1166 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1167 GrowableArray<AbstractLockNode*> &lock_ops);
1168 LockNode *find_matching_lock(UnlockNode* unlock);
1169
1170 // Update the counter to indicate that this lock was eliminated.
1171 void set_eliminated_lock_counter() PRODUCT_RETURN;
1172
1173 public:
1174 AbstractLockNode(const TypeFunc *tf)
1175 : CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
1176 _kind(Regular)
1177 {
1178 #ifndef PRODUCT
1179 _counter = nullptr;
1180 #endif
1181 }
1182 virtual int Opcode() const = 0;
1183 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
1184 Node * box_node() const {return in(TypeFunc::Parms + 1); }
1185 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
1186 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1187
1188 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1189
1190 virtual uint size_of() const { return sizeof(*this); }
1191
1192 bool is_eliminated() const { return (_kind != Regular); }
1193 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1194 bool is_coarsened() const { return (_kind == Coarsened); }
1195 bool is_nested() const { return (_kind == Nested); }
1196
1197 const char * kind_as_string() const;
1198 void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
1199
1200 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1201 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
1202 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
1203
1204 // Check that all locks/unlocks associated with object come from balanced regions.
1205 // They can become unbalanced after coarsening optimization or on OSR entry.
1206 bool is_balanced();
1207
1208 // locking does not modify its arguments
1209 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
1210
1211 #ifndef PRODUCT
1212 void create_lock_counter(JVMState* s);
1213 NamedCounter* counter() const { return _counter; }
1214 virtual void dump_spec(outputStream* st) const;
1215 virtual void dump_compact_spec(outputStream* st) const;
1216 #endif
1217 };
1218
1219 //------------------------------Lock---------------------------------------
1220 // High-level lock operation
1221 //
1222 // This is a subclass of CallNode because it is a macro node which gets expanded
1223 // into a code sequence containing a call. This node takes 3 "parameters":
1224 // 0 - object to lock
1225 // 1 - a BoxLockNode
1226 // 2 - a FastLockNode
1227 //
1228 class LockNode : public AbstractLockNode {
1229 static const TypeFunc* _lock_type_Type;
1230 public:
1231
1232 static inline const TypeFunc* lock_type() {
1233 assert(_lock_type_Type != nullptr, "should be initialized");
1234 return _lock_type_Type;
1235 }
1236
1237 static void initialize_lock_Type() {
1238 assert(_lock_type_Type == nullptr, "should be called once");
1239 // create input type (domain)
1240 const Type **fields = TypeTuple::fields(3);
1241 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1242 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1243 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1244 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1245
1246 // create result type (range)
1247 fields = TypeTuple::fields(0);
1248
1249 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1250
1251 _lock_type_Type = TypeFunc::make(domain,range);
1252 }
1253
1254 virtual int Opcode() const;
1255 virtual uint size_of() const; // Size is bigger
1256 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1257 init_class_id(Class_Lock);
1258 init_flags(Flag_is_macro);
1259 C->add_macro_node(this);
1260 }
1261 virtual bool guaranteed_safepoint() { return false; }
1262
1263 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1264 // Expansion modifies the JVMState, so we need to deep clone it
1265 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1266
1267 bool is_nested_lock_region(); // Is this Lock nested?
1268 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1269 };
1270
1271 //------------------------------Unlock---------------------------------------
1272 // High-level unlock operation
1273 class UnlockNode : public AbstractLockNode {
1274 private:
1275 #ifdef ASSERT
1276 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects
1277 #endif
1278 public:
1279 virtual int Opcode() const;
1280 virtual uint size_of() const; // Size is bigger
1281 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1282 #ifdef ASSERT
1283 , _dbg_jvms(nullptr)
1284 #endif
1285 {
1286 init_class_id(Class_Unlock);
1287 init_flags(Flag_is_macro);
1288 C->add_macro_node(this);
1289 }
1290 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1291 // unlock is never a safepoint
1292 virtual bool guaranteed_safepoint() { return false; }
1293 #ifdef ASSERT
1294 void set_dbg_jvms(JVMState* s) {
1295 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor
1296 }
1297 JVMState* dbg_jvms() const { return _dbg_jvms; }
1298 #else
1299 JVMState* dbg_jvms() const { return nullptr; }
1300 #endif
1301 };
1302 #endif // SHARE_OPTO_CALLNODE_HPP