1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MATCHER_HPP 26 #define SHARE_OPTO_MATCHER_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/compressedKlass.hpp" 31 #include "oops/compressedOops.hpp" 32 #include "opto/node.hpp" 33 #include "opto/phaseX.hpp" 34 #include "opto/regmask.hpp" 35 #include "opto/subnode.hpp" 36 #include "runtime/vm_version.hpp" 37 38 class Compile; 39 class Node; 40 class MachNode; 41 class MachTypeNode; 42 class MachOper; 43 44 //---------------------------Matcher------------------------------------------- 45 class Matcher : public PhaseTransform { 46 friend class VMStructs; 47 48 public: 49 50 // Machine-dependent definitions 51 #include CPU_HEADER(matcher) 52 53 // State and MStack class used in xform() and find_shared() iterative methods. 54 enum Node_State { Pre_Visit, // node has to be pre-visited 55 Visit, // visit node 56 Post_Visit, // post-visit node 57 Alt_Post_Visit // alternative post-visit path 58 }; 59 60 class MStack: public Node_Stack { 61 public: 62 MStack(int size) : Node_Stack(size) { } 63 64 void push(Node *n, Node_State ns) { 65 Node_Stack::push(n, (uint)ns); 66 } 67 void push(Node *n, Node_State ns, Node *parent, int indx) { 68 Node_Stack::push(parent, (uint)indx); 69 Node_Stack::push(n, (uint)ns); 70 } 71 Node *parent() { 72 pop(); 73 return node(); 74 } 75 Node_State state() const { 76 return (Node_State)index(); 77 } 78 void set_state(Node_State ns) { 79 set_index((uint)ns); 80 } 81 }; 82 83 private: 84 // Private arena of State objects 85 ResourceArea _states_arena; 86 87 // Map old nodes to new nodes 88 Node_List _new_nodes; 89 90 VectorSet _visited; // Visit bits 91 92 // Used to control the Label pass 93 VectorSet _shared; // Shared Ideal Node 94 VectorSet _dontcare; // Nothing the matcher cares about 95 96 // Private methods which perform the actual matching and reduction 97 // Walks the label tree, generating machine nodes 98 MachNode *ReduceInst( State *s, int rule, Node *&mem); 99 void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach); 100 uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds); 101 void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach ); 102 103 // If this node already matched using "rule", return the MachNode for it. 104 MachNode* find_shared_node(Node* n, uint rule); 105 106 // Convert a dense opcode number to an expanded rule number 107 const int *_reduceOp; 108 const int *_leftOp; 109 const int *_rightOp; 110 111 // Map dense opcode number to info on when rule is swallowed constant. 112 const bool *_swallowed; 113 114 // Map dense rule number to determine if this is an instruction chain rule 115 const uint _begin_inst_chain_rule; 116 const uint _end_inst_chain_rule; 117 118 // We want to clone constants and possible CmpI-variants. 119 // If we do not clone CmpI, then we can have many instances of 120 // condition codes alive at once. This is OK on some chips and 121 // bad on others. Hence the machine-dependent table lookup. 122 const char *_must_clone; 123 124 // Find shared Nodes, or Nodes that otherwise are Matcher roots 125 void find_shared( Node *n ); 126 bool find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx); 127 void find_shared_post_visit(Node* n, uint opcode); 128 129 bool is_vshift_con_pattern(Node* n, Node* m); 130 131 // Debug and profile information for nodes in old space: 132 GrowableArray<Node_Notes*>* _old_node_note_array; 133 134 // Node labeling iterator for instruction selection 135 Node* Label_Root(const Node* n, State* svec, Node* control, Node*& mem); 136 137 Node *transform( Node *dummy ); 138 139 Node_List _projection_list; // For Machine nodes killing many values 140 141 Node_Array _shared_nodes; 142 143 #ifndef PRODUCT 144 Node_Array _old2new_map; // Map roots of ideal-trees to machine-roots 145 Node_Array _new2old_map; // Maps machine nodes back to ideal 146 VectorSet _reused; // Ideal IGV identifiers reused by machine nodes 147 #endif // !PRODUCT 148 149 void grow_new_node_array(uint idx_limit) { 150 _new_nodes.map(idx_limit-1, nullptr); 151 } 152 bool has_new_node(const Node* n) const { 153 return _new_nodes.at(n->_idx) != nullptr; 154 } 155 Node* new_node(const Node* n) const { 156 assert(has_new_node(n), "set before get"); 157 return _new_nodes.at(n->_idx); 158 } 159 void set_new_node(const Node* n, Node *nn) { 160 assert(!has_new_node(n), "set only once"); 161 _new_nodes.map(n->_idx, nn); 162 } 163 164 #ifdef ASSERT 165 // Make sure only new nodes are reachable from this node 166 void verify_new_nodes_only(Node* root); 167 168 Node* _mem_node; // Ideal memory node consumed by mach node 169 #endif 170 171 // Mach node for ConP #null 172 MachNode* _mach_null; 173 174 void handle_precedence_edges(Node* n, MachNode *mach); 175 176 public: 177 int LabelRootDepth; 178 // Convert ideal machine register to a register mask for spill-loads 179 static const RegMask *idealreg2regmask[]; 180 RegMask *idealreg2spillmask [_last_machine_leaf]; 181 RegMask *idealreg2debugmask [_last_machine_leaf]; 182 RegMask *idealreg2mhdebugmask[_last_machine_leaf]; 183 void init_spill_mask( Node *ret ); 184 // Convert machine register number to register mask 185 static uint mreg2regmask_max; 186 static RegMask mreg2regmask[]; 187 static RegMask STACK_ONLY_mask; 188 static RegMask caller_save_regmask; 189 static RegMask caller_save_regmask_exclude_soe; 190 static RegMask mh_caller_save_regmask; 191 static RegMask mh_caller_save_regmask_exclude_soe; 192 193 MachNode* mach_null() const { return _mach_null; } 194 195 bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } 196 void set_shared( Node *n ) { _shared.set(n->_idx); } 197 bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } 198 void set_visited( Node *n ) { _visited.set(n->_idx); } 199 bool is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; } 200 void set_dontcare( Node *n ) { _dontcare.set(n->_idx); } 201 202 // Mode bit to tell DFA and expand rules whether we are running after 203 // (or during) register selection. Usually, the matcher runs before, 204 // but it will also get called to generate post-allocation spill code. 205 // In this situation, it is a deadly error to attempt to allocate more 206 // temporary registers. 207 bool _allocation_started; 208 209 // Machine register names 210 static const char *regName[]; 211 // Machine register encodings 212 static const unsigned char _regEncode[]; 213 // Machine Node names 214 const char **_ruleName; 215 // Rules that are cheaper to rematerialize than to spill 216 static const uint _begin_rematerialize; 217 static const uint _end_rematerialize; 218 219 // An array of chars, from 0 to _last_Mach_Reg. 220 // No Save = 'N' (for register windows) 221 // Save on Entry = 'E' 222 // Save on Call = 'C' 223 // Always Save = 'A' (same as SOE + SOC) 224 const char *_register_save_policy; 225 const char *_c_reg_save_policy; 226 // Convert a machine register to a machine register type, so-as to 227 // properly match spill code. 228 const int *_register_save_type; 229 // Maps from machine register to boolean; true if machine register can 230 // be holding a call argument in some signature. 231 static bool can_be_java_arg( int reg ); 232 // Maps from machine register to boolean; true if machine register holds 233 // a spillable argument. 234 static bool is_spillable_arg( int reg ); 235 // Number of integer live ranges that constitute high register pressure 236 static uint int_pressure_limit(); 237 // Number of float live ranges that constitute high register pressure 238 static uint float_pressure_limit(); 239 240 // List of IfFalse or IfTrue Nodes that indicate a taken null test. 241 // List is valid in the post-matching space. 242 Node_List _null_check_tests; 243 void collect_null_checks( Node *proj, Node *orig_proj ); 244 void validate_null_checks( ); 245 246 Matcher(); 247 248 // Get a projection node at position pos 249 Node* get_projection(uint pos) { 250 return _projection_list[pos]; 251 } 252 253 // Push a projection node onto the projection list 254 void push_projection(Node* node) { 255 _projection_list.push(node); 256 } 257 258 Node* pop_projection() { 259 return _projection_list.pop(); 260 } 261 262 // Number of nodes in the projection list 263 uint number_of_projections() const { 264 return _projection_list.size(); 265 } 266 267 // Select instructions for entire method 268 void match(); 269 270 // Helper for match 271 OptoReg::Name warp_incoming_stk_arg( VMReg reg ); 272 273 // Transform, then walk. Does implicit DCE while walking. 274 // Name changed from "transform" to avoid it being virtual. 275 Node *xform( Node *old_space_node, int Nodes ); 276 277 // Match a single Ideal Node - turn it into a 1-Node tree; Label & Reduce. 278 MachNode *match_tree( const Node *n ); 279 MachNode *match_sfpt( SafePointNode *sfpt ); 280 // Helper for match_sfpt 281 OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ); 282 283 // Initialize first stack mask and related masks. 284 void init_first_stack_mask(); 285 286 // If we should save-on-entry this register 287 bool is_save_on_entry( int reg ); 288 289 // Fixup the save-on-entry registers 290 void Fixup_Save_On_Entry( ); 291 292 // --- Frame handling --- 293 294 // Register number of the stack slot corresponding to the incoming SP. 295 // Per the Big Picture in the AD file, it is: 296 // SharedInfo::stack0 + locks + in_preserve_stack_slots + pad2. 297 OptoReg::Name _old_SP; 298 299 // Register number of the stack slot corresponding to the highest incoming 300 // argument on the stack. Per the Big Picture in the AD file, it is: 301 // _old_SP + out_preserve_stack_slots + incoming argument size. 302 OptoReg::Name _in_arg_limit; 303 304 // Register number of the stack slot corresponding to the new SP. 305 // Per the Big Picture in the AD file, it is: 306 // _in_arg_limit + pad0 307 OptoReg::Name _new_SP; 308 309 // Register number of the stack slot corresponding to the highest outgoing 310 // argument on the stack. Per the Big Picture in the AD file, it is: 311 // _new_SP + max outgoing arguments of all calls 312 OptoReg::Name _out_arg_limit; 313 314 OptoRegPair *_parm_regs; // Array of machine registers per argument 315 RegMask *_calling_convention_mask; // Array of RegMasks per argument 316 317 // Does matcher have a match rule for this ideal node? 318 static bool has_match_rule(int opcode); 319 static const bool _hasMatchRule[_last_opcode]; 320 321 // Does matcher have a match rule for this ideal node and is the 322 // predicate (if there is one) true? 323 // NOTE: If this function is used more commonly in the future, ADLC 324 // should generate this one. 325 static bool match_rule_supported(int opcode); 326 327 // Identify extra cases that we might want to vectorize automatically 328 // And exclude cases which are not profitable to auto-vectorize. 329 static bool match_rule_supported_auto_vectorization(int opcode, int vlen, BasicType bt); 330 331 // identify extra cases that we might want to provide match rules for 332 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen 333 static bool match_rule_supported_vector(int opcode, int vlen, BasicType bt); 334 335 static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt); 336 337 static bool vector_needs_partial_operations(Node* node, const TypeVect* vt); 338 339 static bool vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen); 340 341 static const RegMask* predicate_reg_mask(void); 342 343 // Vector width in bytes 344 static int vector_width_in_bytes(BasicType bt); 345 346 // Limits on vector size (number of elements). 347 static int max_vector_size(const BasicType bt); 348 static int min_vector_size(const BasicType bt); 349 static bool vector_size_supported(const BasicType bt, int size) { 350 return (Matcher::max_vector_size(bt) >= size && 351 Matcher::min_vector_size(bt) <= size); 352 } 353 // Limits on max vector size (number of elements) for auto-vectorization. 354 static int max_vector_size_auto_vectorization(const BasicType bt); 355 356 // Actual max scalable vector register length. 357 static int scalable_vector_reg_size(const BasicType bt); 358 // Actual max scalable predicate register length. 359 static int scalable_predicate_reg_slots(); 360 361 // Vector ideal reg 362 static uint vector_ideal_reg(int len); 363 364 // Vector length 365 static uint vector_length(const Node* n); 366 static uint vector_length(const MachNode* use, const MachOper* opnd); 367 368 // Vector length in bytes 369 static uint vector_length_in_bytes(const Node* n); 370 static uint vector_length_in_bytes(const MachNode* use, const MachOper* opnd); 371 372 // Vector element basic type 373 static BasicType vector_element_basic_type(const Node* n); 374 static BasicType vector_element_basic_type(const MachNode* use, const MachOper* opnd); 375 376 // Vector element basic type is non double word integral type. 377 static bool is_non_long_integral_vector(const Node* n); 378 379 // Check if given booltest condition is unsigned or not 380 static inline bool is_unsigned_booltest_pred(int bt) { 381 return ((bt & BoolTest::unsigned_compare) == BoolTest::unsigned_compare); 382 } 383 384 static bool is_encode_and_store_pattern(const Node* n, const Node* m); 385 386 // These calls are all generated by the ADLC 387 388 // Java-Java calling convention 389 // (what you use when Java calls Java) 390 391 // Alignment of stack in bytes, standard Intel word alignment is 4. 392 // Sparc probably wants at least double-word (8). 393 static uint stack_alignment_in_bytes(); 394 // Alignment of stack, measured in stack slots. 395 // The size of stack slots is defined by VMRegImpl::stack_slot_size. 396 static uint stack_alignment_in_slots() { 397 return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size); 398 } 399 400 // Convert a sig into a calling convention register layout 401 // and find interesting things about it. 402 static OptoReg::Name find_receiver(); 403 // Return address register. On Intel it is a stack-slot. On PowerPC 404 // it is the Link register. On Sparc it is r31? 405 virtual OptoReg::Name return_addr() const; 406 RegMask _return_addr_mask; 407 // Return value register. On Intel it is EAX. 408 static OptoRegPair return_value(uint ideal_reg); 409 static OptoRegPair c_return_value(uint ideal_reg); 410 RegMask _return_value_mask; 411 // Inline Cache Register 412 static OptoReg::Name inline_cache_reg(); 413 static int inline_cache_reg_encode(); 414 415 // Register for DIVI projection of divmodI 416 static RegMask divI_proj_mask(); 417 // Register for MODI projection of divmodI 418 static RegMask modI_proj_mask(); 419 420 // Register for DIVL projection of divmodL 421 static RegMask divL_proj_mask(); 422 // Register for MODL projection of divmodL 423 static RegMask modL_proj_mask(); 424 425 // Use hardware DIV instruction when it is faster than 426 // a code which use multiply for division by constant. 427 static bool use_asm_for_ldiv_by_con( jlong divisor ); 428 429 static const RegMask method_handle_invoke_SP_save_mask(); 430 431 // Java-Interpreter calling convention 432 // (what you use when calling between compiled-Java and Interpreted-Java 433 434 // Number of callee-save + always-save registers 435 // Ignores frame pointer and "special" registers 436 static int number_of_saved_registers(); 437 438 // The Method-klass-holder may be passed in the inline_cache_reg 439 // and then expanded into the inline_cache_reg and a method_ptr register 440 441 // Interpreter's Frame Pointer Register 442 static OptoReg::Name interpreter_frame_pointer_reg(); 443 444 // Java-Native calling convention 445 // (what you use when intercalling between Java and C++ code) 446 447 // Frame pointer. The frame pointer is kept at the base of the stack 448 // and so is probably the stack pointer for most machines. On Intel 449 // it is ESP. On the PowerPC it is R1. On Sparc it is SP. 450 OptoReg::Name c_frame_pointer() const; 451 static RegMask c_frame_ptr_mask; 452 453 // Java-Native vector calling convention 454 static bool supports_vector_calling_convention(); 455 static OptoRegPair vector_return_value(uint ideal_reg); 456 457 // Is this branch offset small enough to be addressed by a short branch? 458 bool is_short_branch_offset(int rule, int br_size, int offset); 459 460 // Should the input 'm' of node 'n' be cloned during matching? 461 // Reports back whether the node was cloned or not. 462 bool clone_node(Node* n, Node* m, Matcher::MStack& mstack); 463 bool pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack); 464 465 // Should the Matcher clone shifts on addressing modes, expecting them to 466 // be subsumed into complex addressing expressions or compute them into 467 // registers? True for Intel but false for most RISCs 468 bool pd_clone_address_expressions(AddPNode* m, MStack& mstack, VectorSet& address_visited); 469 // Clone base + offset address expression 470 bool clone_base_plus_offset_address(AddPNode* m, MStack& mstack, VectorSet& address_visited); 471 472 // Generate implicit null check for narrow oops if it can fold 473 // into address expression (x64). 474 // 475 // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression 476 // NullCheck narrow_oop_reg 477 // 478 // When narrow oops can't fold into address expression (Sparc) and 479 // base is not null use decode_not_null and normal implicit null check. 480 // Note, decode_not_null node can be used here since it is referenced 481 // only on non null path but it requires special handling, see 482 // collect_null_checks(): 483 // 484 // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base' 485 // [oop_reg + offset] 486 // NullCheck oop_reg 487 // 488 // With Zero base and when narrow oops can not fold into address 489 // expression use normal implicit null check since only shift 490 // is needed to decode narrow oop. 491 // 492 // decode narrow_oop_reg, oop_reg // only 'shift' 493 // [oop_reg + offset] 494 // NullCheck oop_reg 495 // 496 static bool gen_narrow_oop_implicit_null_checks(); 497 498 private: 499 void do_postselect_cleanup(); 500 501 void specialize_generic_vector_operands(); 502 void specialize_mach_node(MachNode* m); 503 void specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx); 504 MachOper* specialize_vector_operand(MachNode* m, uint opnd_idx); 505 506 static MachOper* pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp); 507 static bool is_reg2reg_move(MachNode* m); 508 static bool is_generic_vector(MachOper* opnd); 509 510 const RegMask* regmask_for_ideal_register(uint ideal_reg, Node* ret); 511 512 // Graph verification code 513 DEBUG_ONLY( bool verify_after_postselect_cleanup(); ) 514 515 public: 516 // This routine is run whenever a graph fails to match. 517 // If it returns, the compiler should bailout to interpreter without error. 518 // In non-product mode, SoftMatchFailure is false to detect non-canonical 519 // graphs. Print a message and exit. 520 static void soft_match_failure() { 521 if( SoftMatchFailure ) return; 522 else { fatal("SoftMatchFailure is not allowed except in product"); } 523 } 524 525 // Check for a following volatile memory barrier without an 526 // intervening load and thus we don't need a barrier here. We 527 // retain the Node to act as a compiler ordering barrier. 528 static bool post_store_load_barrier(const Node* mb); 529 530 // Does n lead to an uncommon trap that can cause deoptimization? 531 static bool branches_to_uncommon_trap(const Node *n); 532 533 #ifndef PRODUCT 534 // Record mach-to-Ideal mapping, reusing the Ideal IGV identifier if possible. 535 void record_new2old(Node* newn, Node* old); 536 537 void dump_old2new_map(); // machine-independent to machine-dependent 538 539 Node* find_old_node(const Node* new_node) { 540 return _new2old_map[new_node->_idx]; 541 } 542 #endif // !PRODUCT 543 }; 544 545 #endif // SHARE_OPTO_MATCHER_HPP