1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_MATCHER_HPP
26 #define SHARE_OPTO_MATCHER_HPP
27
28 #include "libadt/vectset.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/compressedKlass.hpp"
31 #include "oops/compressedOops.hpp"
32 #include "opto/node.hpp"
33 #include "opto/phaseX.hpp"
34 #include "opto/regmask.hpp"
35 #include "opto/subnode.hpp"
36 #include "runtime/vm_version.hpp"
37
38 class Compile;
39 class Node;
40 class MachNode;
41 class MachTypeNode;
42 class MachOper;
43
44 //---------------------------Matcher-------------------------------------------
45 class Matcher : public PhaseTransform {
46 friend class VMStructs;
47
48 public:
49
50 // Machine-dependent definitions
51 #include CPU_HEADER(matcher)
52
53 // State and MStack class used in xform() and find_shared() iterative methods.
54 enum Node_State { Pre_Visit, // node has to be pre-visited
55 Visit, // visit node
56 Post_Visit, // post-visit node
57 Alt_Post_Visit // alternative post-visit path
58 };
59
60 class MStack: public Node_Stack {
61 public:
62 MStack(int size) : Node_Stack(size) { }
63
64 void push(Node *n, Node_State ns) {
65 Node_Stack::push(n, (uint)ns);
66 }
67 void push(Node *n, Node_State ns, Node *parent, int indx) {
68 ++_inode_top;
69 if ((_inode_top + 1) >= _inode_max) grow();
70 _inode_top->node = parent;
71 _inode_top->indx = (uint)indx;
72 ++_inode_top;
73 _inode_top->node = n;
74 _inode_top->indx = (uint)ns;
75 }
76 Node *parent() {
77 pop();
78 return node();
79 }
80 Node_State state() const {
81 return (Node_State)index();
82 }
83 void set_state(Node_State ns) {
84 set_index((uint)ns);
85 }
86 };
87
88 private:
89 // Private arena of State objects
90 ResourceArea _states_arena;
91
92 // Map old nodes to new nodes
93 Node_List _new_nodes;
94
95 VectorSet _visited; // Visit bits
96
97 // Used to control the Label pass
98 VectorSet _shared; // Shared Ideal Node
99 VectorSet _dontcare; // Nothing the matcher cares about
100
101 // Private methods which perform the actual matching and reduction
102 // Walks the label tree, generating machine nodes
103 MachNode *ReduceInst( State *s, int rule, Node *&mem);
104 void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach);
105 uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds);
106 void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach );
107
108 // If this node already matched using "rule", return the MachNode for it.
109 MachNode* find_shared_node(Node* n, uint rule);
110
111 // Convert a dense opcode number to an expanded rule number
112 const int *_reduceOp;
113 const int *_leftOp;
114 const int *_rightOp;
115
116 // Map dense opcode number to info on when rule is swallowed constant.
117 const bool *_swallowed;
118
119 // Map dense rule number to determine if this is an instruction chain rule
120 const uint _begin_inst_chain_rule;
121 const uint _end_inst_chain_rule;
122
123 // We want to clone constants and possible CmpI-variants.
124 // If we do not clone CmpI, then we can have many instances of
125 // condition codes alive at once. This is OK on some chips and
126 // bad on others. Hence the machine-dependent table lookup.
127 const char *_must_clone;
128
129 // Find shared Nodes, or Nodes that otherwise are Matcher roots
130 void find_shared( Node *n );
131 bool find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx);
132 void find_shared_post_visit(Node* n, uint opcode);
133
134 bool is_vshift_con_pattern(Node* n, Node* m);
135
136 // Debug and profile information for nodes in old space:
137 GrowableArray<Node_Notes*>* _old_node_note_array;
138
139 // Node labeling iterator for instruction selection
140 Node* Label_Root(const Node* n, State* svec, Node* control, Node*& mem);
141
142 Node *transform( Node *dummy );
143
144 Node_List _projection_list; // For Machine nodes killing many values
145
146 Node_Array _shared_nodes;
147
148 #ifndef PRODUCT
149 Node_Array _old2new_map; // Map roots of ideal-trees to machine-roots
150 Node_Array _new2old_map; // Maps machine nodes back to ideal
151 VectorSet _reused; // Ideal IGV identifiers reused by machine nodes
152 #endif // !PRODUCT
153
154 void grow_new_node_array(uint idx_limit) {
155 _new_nodes.map(idx_limit-1, nullptr);
156 }
157 bool has_new_node(const Node* n) const {
158 return _new_nodes.at(n->_idx) != nullptr;
159 }
160 Node* new_node(const Node* n) const {
161 assert(has_new_node(n), "set before get");
162 return _new_nodes.at(n->_idx);
163 }
164 void set_new_node(const Node* n, Node *nn) {
165 assert(!has_new_node(n), "set only once");
166 _new_nodes.map(n->_idx, nn);
167 }
168
169 #ifdef ASSERT
170 // Make sure only new nodes are reachable from this node
171 void verify_new_nodes_only(Node* root);
172
173 Node* _mem_node; // Ideal memory node consumed by mach node
174 #endif
175
176 // Mach node for ConP #null
177 MachNode* _mach_null;
178
179 void handle_precedence_edges(Node* n, MachNode *mach);
180
181 public:
182 int LabelRootDepth;
183 // Convert ideal machine register to a register mask for spill-loads
184 static const RegMask *idealreg2regmask[];
185 RegMask *idealreg2spillmask [_last_machine_leaf];
186 RegMask *idealreg2debugmask [_last_machine_leaf];
187 RegMask *idealreg2mhdebugmask[_last_machine_leaf];
188 void init_spill_mask( Node *ret );
189 // Convert machine register number to register mask
190 static uint mreg2regmask_max;
191 static RegMask mreg2regmask[];
192 static RegMask STACK_ONLY_mask;
193 static RegMask caller_save_regmask;
194 static RegMask caller_save_regmask_exclude_soe;
195 static RegMask mh_caller_save_regmask;
196 static RegMask mh_caller_save_regmask_exclude_soe;
197
198 MachNode* mach_null() const { return _mach_null; }
199
200 bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; }
201 void set_shared( Node *n ) { _shared.set(n->_idx); }
202 bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; }
203 void set_visited( Node *n ) { _visited.set(n->_idx); }
204 bool is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; }
205 void set_dontcare( Node *n ) { _dontcare.set(n->_idx); }
206
207 // Mode bit to tell DFA and expand rules whether we are running after
208 // (or during) register selection. Usually, the matcher runs before,
209 // but it will also get called to generate post-allocation spill code.
210 // In this situation, it is a deadly error to attempt to allocate more
211 // temporary registers.
212 bool _allocation_started;
213
214 // Machine register names
215 static const char *regName[];
216 // Machine register encodings
217 static const unsigned char _regEncode[];
218 // Machine Node names
219 const char **_ruleName;
220 // Rules that are cheaper to rematerialize than to spill
221 static const uint _begin_rematerialize;
222 static const uint _end_rematerialize;
223
224 // An array of chars, from 0 to _last_Mach_Reg.
225 // No Save = 'N' (for register windows)
226 // Save on Entry = 'E'
227 // Save on Call = 'C'
228 // Always Save = 'A' (same as SOE + SOC)
229 const char *_register_save_policy;
230 const char *_c_reg_save_policy;
231 // Convert a machine register to a machine register type, so-as to
232 // properly match spill code.
233 const int *_register_save_type;
234 // Maps from machine register to boolean; true if machine register can
235 // be holding a call argument in some signature.
236 static bool can_be_java_arg( int reg );
237 // Maps from machine register to boolean; true if machine register holds
238 // a spillable argument.
239 static bool is_spillable_arg( int reg );
240 // Number of integer live ranges that constitute high register pressure
241 static uint int_pressure_limit();
242 // Number of float live ranges that constitute high register pressure
243 static uint float_pressure_limit();
244
245 // List of IfFalse or IfTrue Nodes that indicate a taken null test.
246 // List is valid in the post-matching space.
247 Node_List _null_check_tests;
248 void collect_null_checks( Node *proj, Node *orig_proj );
249 void validate_null_checks( );
250
251 Matcher();
252
253 // Get a projection node at position pos
254 Node* get_projection(uint pos) {
255 return _projection_list[pos];
256 }
257
258 // Push a projection node onto the projection list
259 void push_projection(Node* node) {
260 _projection_list.push(node);
261 }
262
263 Node* pop_projection() {
264 return _projection_list.pop();
265 }
266
267 // Number of nodes in the projection list
268 uint number_of_projections() const {
269 return _projection_list.size();
270 }
271
272 // Select instructions for entire method
273 void match();
274
275 // Helper for match
276 OptoReg::Name warp_incoming_stk_arg( VMReg reg );
277
278 // Transform, then walk. Does implicit DCE while walking.
279 // Name changed from "transform" to avoid it being virtual.
280 Node *xform( Node *old_space_node, int Nodes );
281
282 // Match a single Ideal Node - turn it into a 1-Node tree; Label & Reduce.
283 MachNode *match_tree( const Node *n );
284 MachNode *match_sfpt( SafePointNode *sfpt );
285 // Helper for match_sfpt
286 OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call );
287
288 // Initialize first stack mask and related masks.
289 void init_first_stack_mask();
290
291 // If we should save-on-entry this register
292 bool is_save_on_entry( int reg );
293
294 // Fixup the save-on-entry registers
295 void Fixup_Save_On_Entry( );
296
297 // --- Frame handling ---
298
299 // Register number of the stack slot corresponding to the incoming SP.
300 // Per the Big Picture in the AD file, it is:
301 // SharedInfo::stack0 + locks + in_preserve_stack_slots + pad2.
302 OptoReg::Name _old_SP;
303
304 // Register number of the stack slot corresponding to the highest incoming
305 // argument on the stack. Per the Big Picture in the AD file, it is:
306 // _old_SP + out_preserve_stack_slots + incoming argument size.
307 OptoReg::Name _in_arg_limit;
308
309 // Register number of the stack slot corresponding to the new SP.
310 // Per the Big Picture in the AD file, it is:
311 // _in_arg_limit + pad0
312 OptoReg::Name _new_SP;
313
314 // Register number of the stack slot corresponding to the highest outgoing
315 // argument on the stack. Per the Big Picture in the AD file, it is:
316 // _new_SP + max outgoing arguments of all calls
317 OptoReg::Name _out_arg_limit;
318
319 OptoRegPair *_parm_regs; // Array of machine registers per argument
320 RegMask *_calling_convention_mask; // Array of RegMasks per argument
321
322 // Does matcher have a match rule for this ideal node?
323 static bool has_match_rule(int opcode);
324 static const bool _hasMatchRule[_last_opcode];
325
326 // Does matcher have a match rule for this ideal node and is the
327 // predicate (if there is one) true?
328 // NOTE: If this function is used more commonly in the future, ADLC
329 // should generate this one.
330 static bool match_rule_supported(int opcode);
331
332 // Identify extra cases that we might want to vectorize automatically
333 // And exclude cases which are not profitable to auto-vectorize.
334 static bool match_rule_supported_auto_vectorization(int opcode, int vlen, BasicType bt);
335
336 // identify extra cases that we might want to provide match rules for
337 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
338 static bool match_rule_supported_vector(int opcode, int vlen, BasicType bt);
339
340 static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt);
341
342 static bool vector_needs_partial_operations(Node* node, const TypeVect* vt);
343
344 static const RegMask* predicate_reg_mask(void);
345 static const TypeVectMask* predicate_reg_type(const Type* elemTy, int length);
346
347 // Vector width in bytes
348 static int vector_width_in_bytes(BasicType bt);
349
350 // Limits on vector size (number of elements).
351 static int max_vector_size(const BasicType bt);
352 static int min_vector_size(const BasicType bt);
353 static bool vector_size_supported(const BasicType bt, int size) {
354 return (Matcher::max_vector_size(bt) >= size &&
355 Matcher::min_vector_size(bt) <= size);
356 }
357 // Limits on max vector size (number of elements) for auto-vectorization.
358 static int max_vector_size_auto_vectorization(const BasicType bt);
359
360 // Actual max scalable vector register length.
361 static int scalable_vector_reg_size(const BasicType bt);
362 // Actual max scalable predicate register length.
363 static int scalable_predicate_reg_slots();
364
365 // Vector ideal reg
366 static uint vector_ideal_reg(int len);
367
368 // Vector length
369 static uint vector_length(const Node* n);
370 static uint vector_length(const MachNode* use, const MachOper* opnd);
371
372 // Vector length in bytes
373 static uint vector_length_in_bytes(const Node* n);
374 static uint vector_length_in_bytes(const MachNode* use, const MachOper* opnd);
375
376 // Vector element basic type
377 static BasicType vector_element_basic_type(const Node* n);
378 static BasicType vector_element_basic_type(const MachNode* use, const MachOper* opnd);
379
380 // Vector element basic type is non double word integral type.
381 static bool is_non_long_integral_vector(const Node* n);
382
383 // Check if given booltest condition is unsigned or not
384 static inline bool is_unsigned_booltest_pred(int bt) {
385 return ((bt & BoolTest::unsigned_compare) == BoolTest::unsigned_compare);
386 }
387
388 // These calls are all generated by the ADLC
389
390 // Java-Java calling convention
391 // (what you use when Java calls Java)
392
393 // Alignment of stack in bytes, standard Intel word alignment is 4.
394 // Sparc probably wants at least double-word (8).
395 static uint stack_alignment_in_bytes();
396 // Alignment of stack, measured in stack slots.
397 // The size of stack slots is defined by VMRegImpl::stack_slot_size.
398 static uint stack_alignment_in_slots() {
399 return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size);
400 }
401
402 // Convert a sig into a calling convention register layout
403 // and find interesting things about it.
404 static OptoReg::Name find_receiver();
405 // Return address register. On Intel it is a stack-slot. On PowerPC
406 // it is the Link register. On Sparc it is r31?
407 virtual OptoReg::Name return_addr() const;
408 RegMask _return_addr_mask;
409 // Return value register. On Intel it is EAX.
410 static OptoRegPair return_value(uint ideal_reg);
411 static OptoRegPair c_return_value(uint ideal_reg);
412 RegMask _return_value_mask;
413 // Inline Cache Register
414 static OptoReg::Name inline_cache_reg();
415 static int inline_cache_reg_encode();
416
417 // Register for DIVI projection of divmodI
418 static RegMask divI_proj_mask();
419 // Register for MODI projection of divmodI
420 static RegMask modI_proj_mask();
421
422 // Register for DIVL projection of divmodL
423 static RegMask divL_proj_mask();
424 // Register for MODL projection of divmodL
425 static RegMask modL_proj_mask();
426
427 // Use hardware DIV instruction when it is faster than
428 // a code which use multiply for division by constant.
429 static bool use_asm_for_ldiv_by_con( jlong divisor );
430
431 static const RegMask method_handle_invoke_SP_save_mask();
432
433 // Java-Interpreter calling convention
434 // (what you use when calling between compiled-Java and Interpreted-Java
435
436 // Number of callee-save + always-save registers
437 // Ignores frame pointer and "special" registers
438 static int number_of_saved_registers();
439
440 // The Method-klass-holder may be passed in the inline_cache_reg
441 // and then expanded into the inline_cache_reg and a method_ptr register
442
443 // Interpreter's Frame Pointer Register
444 static OptoReg::Name interpreter_frame_pointer_reg();
445
446 // Java-Native calling convention
447 // (what you use when intercalling between Java and C++ code)
448
449 // Frame pointer. The frame pointer is kept at the base of the stack
450 // and so is probably the stack pointer for most machines. On Intel
451 // it is ESP. On the PowerPC it is R1. On Sparc it is SP.
452 OptoReg::Name c_frame_pointer() const;
453 static RegMask c_frame_ptr_mask;
454
455 // Java-Native vector calling convention
456 static bool supports_vector_calling_convention();
457 static OptoRegPair vector_return_value(uint ideal_reg);
458
459 // Is this branch offset small enough to be addressed by a short branch?
460 bool is_short_branch_offset(int rule, int br_size, int offset);
461
462 // Should the input 'm' of node 'n' be cloned during matching?
463 // Reports back whether the node was cloned or not.
464 bool clone_node(Node* n, Node* m, Matcher::MStack& mstack);
465 bool pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack);
466
467 // Should the Matcher clone shifts on addressing modes, expecting them to
468 // be subsumed into complex addressing expressions or compute them into
469 // registers? True for Intel but false for most RISCs
470 bool pd_clone_address_expressions(AddPNode* m, MStack& mstack, VectorSet& address_visited);
471 // Clone base + offset address expression
472 bool clone_base_plus_offset_address(AddPNode* m, MStack& mstack, VectorSet& address_visited);
473
474 // Generate implicit null check for narrow oops if it can fold
475 // into address expression (x64).
476 //
477 // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression
478 // NullCheck narrow_oop_reg
479 //
480 // When narrow oops can't fold into address expression (Sparc) and
481 // base is not null use decode_not_null and normal implicit null check.
482 // Note, decode_not_null node can be used here since it is referenced
483 // only on non null path but it requires special handling, see
484 // collect_null_checks():
485 //
486 // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base'
487 // [oop_reg + offset]
488 // NullCheck oop_reg
489 //
490 // With Zero base and when narrow oops can not fold into address
491 // expression use normal implicit null check since only shift
492 // is needed to decode narrow oop.
493 //
494 // decode narrow_oop_reg, oop_reg // only 'shift'
495 // [oop_reg + offset]
496 // NullCheck oop_reg
497 //
498 static bool gen_narrow_oop_implicit_null_checks();
499
500 private:
501 void do_postselect_cleanup();
502
503 void specialize_generic_vector_operands();
504 void specialize_mach_node(MachNode* m);
505 void specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx);
506 MachOper* specialize_vector_operand(MachNode* m, uint opnd_idx);
507
508 static MachOper* pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp);
509 static bool is_reg2reg_move(MachNode* m);
510 static bool is_generic_vector(MachOper* opnd);
511
512 const RegMask* regmask_for_ideal_register(uint ideal_reg, Node* ret);
513
514 // Graph verification code
515 DEBUG_ONLY( bool verify_after_postselect_cleanup(); )
516
517 public:
518 // This routine is run whenever a graph fails to match.
519 // If it returns, the compiler should bailout to interpreter without error.
520 // In non-product mode, SoftMatchFailure is false to detect non-canonical
521 // graphs. Print a message and exit.
522 static void soft_match_failure() {
523 if( SoftMatchFailure ) return;
524 else { fatal("SoftMatchFailure is not allowed except in product"); }
525 }
526
527 // Check for a following volatile memory barrier without an
528 // intervening load and thus we don't need a barrier here. We
529 // retain the Node to act as a compiler ordering barrier.
530 static bool post_store_load_barrier(const Node* mb);
531
532 // Does n lead to an uncommon trap that can cause deoptimization?
533 static bool branches_to_uncommon_trap(const Node *n);
534
535 #ifndef PRODUCT
536 // Record mach-to-Ideal mapping, reusing the Ideal IGV identifier if possible.
537 void record_new2old(Node* newn, Node* old);
538
539 void dump_old2new_map(); // machine-independent to machine-dependent
540
541 Node* find_old_node(const Node* new_node) {
542 return _new2old_map[new_node->_idx];
543 }
544 #endif // !PRODUCT
545 };
546
547 #endif // SHARE_OPTO_MATCHER_HPP
--- EOF ---