1 //
    2 // Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2012, 2023 SAP SE. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 //
   27 // PPC64 Architecture Description File
   28 //
   29 
   30 //----------REGISTER DEFINITION BLOCK------------------------------------------
   31 // This information is used by the matcher and the register allocator to
   32 // describe individual registers and classes of registers within the target
   33 // architecture.
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name (register save type, C convention save type,
   38 //                  ideal register type, encoding);
   39 //
   40 // Register Save Types:
   41 //
   42 //   NS  = No-Save:     The register allocator assumes that these registers
   43 //                      can be used without saving upon entry to the method, &
   44 //                      that they do not need to be saved at call sites.
   45 //
   46 //   SOC = Save-On-Call: The register allocator assumes that these registers
   47 //                      can be used without saving upon entry to the method,
   48 //                      but that they must be saved at call sites.
   49 //                      These are called "volatiles" on ppc.
   50 //
   51 //   SOE = Save-On-Entry: The register allocator assumes that these registers
   52 //                      must be saved before using them upon entry to the
   53 //                      method, but they do not need to be saved at call
   54 //                      sites.
   55 //                      These are called "nonvolatiles" on ppc.
   56 //
   57 //   AS  = Always-Save:   The register allocator assumes that these registers
   58 //                      must be saved before using them upon entry to the
   59 //                      method, & that they must be saved at call sites.
   60 //
   61 // Ideal Register Type is used to determine how to save & restore a
   62 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   63 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
   64 //
   65 // The encoding number is the actual bit-pattern placed into the opcodes.
   66 //
   67 // PPC64 register definitions, based on the 64-bit PowerPC ELF ABI
   68 // Supplement Version 1.7 as of 2003-10-29.
   69 //
   70 // For each 64-bit register we must define two registers: the register
   71 // itself, e.g. R3, and a corresponding virtual other (32-bit-)'half',
   72 // e.g. R3_H, which is needed by the allocator, but is not used
   73 // for stores, loads, etc.
   74 
   75 // ----------------------------
   76 // Integer/Long Registers
   77 // ----------------------------
   78 
   79   // PPC64 has 32 64-bit integer registers.
   80 
   81   // types: v = volatile, nv = non-volatile, s = system
   82   reg_def R0   ( SOC, SOC, Op_RegI,  0, R0->as_VMReg()         );  // v   used in prologs
   83   reg_def R0_H ( SOC, SOC, Op_RegI, 99, R0->as_VMReg()->next() );
   84   reg_def R1   ( NS,  NS,  Op_RegI,  1, R1->as_VMReg()         );  // s   SP
   85   reg_def R1_H ( NS,  NS,  Op_RegI, 99, R1->as_VMReg()->next() );
   86   reg_def R2   ( SOC, SOC, Op_RegI,  2, R2->as_VMReg()         );  // v   TOC
   87   reg_def R2_H ( SOC, SOC, Op_RegI, 99, R2->as_VMReg()->next() );
   88   reg_def R3   ( SOC, SOC, Op_RegI,  3, R3->as_VMReg()         );  // v   iarg1 & iret
   89   reg_def R3_H ( SOC, SOC, Op_RegI, 99, R3->as_VMReg()->next() );
   90   reg_def R4   ( SOC, SOC, Op_RegI,  4, R4->as_VMReg()         );  //     iarg2
   91   reg_def R4_H ( SOC, SOC, Op_RegI, 99, R4->as_VMReg()->next() );
   92   reg_def R5   ( SOC, SOC, Op_RegI,  5, R5->as_VMReg()         );  // v   iarg3
   93   reg_def R5_H ( SOC, SOC, Op_RegI, 99, R5->as_VMReg()->next() );
   94   reg_def R6   ( SOC, SOC, Op_RegI,  6, R6->as_VMReg()         );  // v   iarg4
   95   reg_def R6_H ( SOC, SOC, Op_RegI, 99, R6->as_VMReg()->next() );
   96   reg_def R7   ( SOC, SOC, Op_RegI,  7, R7->as_VMReg()         );  // v   iarg5
   97   reg_def R7_H ( SOC, SOC, Op_RegI, 99, R7->as_VMReg()->next() );
   98   reg_def R8   ( SOC, SOC, Op_RegI,  8, R8->as_VMReg()         );  // v   iarg6
   99   reg_def R8_H ( SOC, SOC, Op_RegI, 99, R8->as_VMReg()->next() );
  100   reg_def R9   ( SOC, SOC, Op_RegI,  9, R9->as_VMReg()         );  // v   iarg7
  101   reg_def R9_H ( SOC, SOC, Op_RegI, 99, R9->as_VMReg()->next() );
  102   reg_def R10  ( SOC, SOC, Op_RegI, 10, R10->as_VMReg()        );  // v   iarg8
  103   reg_def R10_H( SOC, SOC, Op_RegI, 99, R10->as_VMReg()->next());
  104   reg_def R11  ( SOC, SOC, Op_RegI, 11, R11->as_VMReg()        );  // v   ENV / scratch
  105   reg_def R11_H( SOC, SOC, Op_RegI, 99, R11->as_VMReg()->next());
  106   reg_def R12  ( SOC, SOC, Op_RegI, 12, R12->as_VMReg()        );  // v   scratch
  107   reg_def R12_H( SOC, SOC, Op_RegI, 99, R12->as_VMReg()->next());
  108   reg_def R13  ( NS,  NS,  Op_RegI, 13, R13->as_VMReg()        );  // s   system thread id
  109   reg_def R13_H( NS,  NS,  Op_RegI, 99, R13->as_VMReg()->next());
  110   reg_def R14  ( SOC, SOE, Op_RegI, 14, R14->as_VMReg()        );  // nv
  111   reg_def R14_H( SOC, SOE, Op_RegI, 99, R14->as_VMReg()->next());
  112   reg_def R15  ( SOC, SOE, Op_RegI, 15, R15->as_VMReg()        );  // nv
  113   reg_def R15_H( SOC, SOE, Op_RegI, 99, R15->as_VMReg()->next());
  114   reg_def R16  ( SOC, SOE, Op_RegI, 16, R16->as_VMReg()        );  // nv
  115   reg_def R16_H( SOC, SOE, Op_RegI, 99, R16->as_VMReg()->next());
  116   reg_def R17  ( SOC, SOE, Op_RegI, 17, R17->as_VMReg()        );  // nv
  117   reg_def R17_H( SOC, SOE, Op_RegI, 99, R17->as_VMReg()->next());
  118   reg_def R18  ( SOC, SOE, Op_RegI, 18, R18->as_VMReg()        );  // nv
  119   reg_def R18_H( SOC, SOE, Op_RegI, 99, R18->as_VMReg()->next());
  120   reg_def R19  ( SOC, SOE, Op_RegI, 19, R19->as_VMReg()        );  // nv
  121   reg_def R19_H( SOC, SOE, Op_RegI, 99, R19->as_VMReg()->next());
  122   reg_def R20  ( SOC, SOE, Op_RegI, 20, R20->as_VMReg()        );  // nv
  123   reg_def R20_H( SOC, SOE, Op_RegI, 99, R20->as_VMReg()->next());
  124   reg_def R21  ( SOC, SOE, Op_RegI, 21, R21->as_VMReg()        );  // nv
  125   reg_def R21_H( SOC, SOE, Op_RegI, 99, R21->as_VMReg()->next());
  126   reg_def R22  ( SOC, SOE, Op_RegI, 22, R22->as_VMReg()        );  // nv
  127   reg_def R22_H( SOC, SOE, Op_RegI, 99, R22->as_VMReg()->next());
  128   reg_def R23  ( SOC, SOE, Op_RegI, 23, R23->as_VMReg()        );  // nv
  129   reg_def R23_H( SOC, SOE, Op_RegI, 99, R23->as_VMReg()->next());
  130   reg_def R24  ( SOC, SOE, Op_RegI, 24, R24->as_VMReg()        );  // nv
  131   reg_def R24_H( SOC, SOE, Op_RegI, 99, R24->as_VMReg()->next());
  132   reg_def R25  ( SOC, SOE, Op_RegI, 25, R25->as_VMReg()        );  // nv
  133   reg_def R25_H( SOC, SOE, Op_RegI, 99, R25->as_VMReg()->next());
  134   reg_def R26  ( SOC, SOE, Op_RegI, 26, R26->as_VMReg()        );  // nv
  135   reg_def R26_H( SOC, SOE, Op_RegI, 99, R26->as_VMReg()->next());
  136   reg_def R27  ( SOC, SOE, Op_RegI, 27, R27->as_VMReg()        );  // nv
  137   reg_def R27_H( SOC, SOE, Op_RegI, 99, R27->as_VMReg()->next());
  138   reg_def R28  ( SOC, SOE, Op_RegI, 28, R28->as_VMReg()        );  // nv
  139   reg_def R28_H( SOC, SOE, Op_RegI, 99, R28->as_VMReg()->next());
  140   reg_def R29  ( SOC, SOE, Op_RegI, 29, R29->as_VMReg()        );  // nv
  141   reg_def R29_H( SOC, SOE, Op_RegI, 99, R29->as_VMReg()->next());
  142   reg_def R30  ( SOC, SOE, Op_RegI, 30, R30->as_VMReg()        );  // nv
  143   reg_def R30_H( SOC, SOE, Op_RegI, 99, R30->as_VMReg()->next());
  144   reg_def R31  ( SOC, SOE, Op_RegI, 31, R31->as_VMReg()        );  // nv
  145   reg_def R31_H( SOC, SOE, Op_RegI, 99, R31->as_VMReg()->next());
  146 
  147 
  148 // ----------------------------
  149 // Float/Double Registers
  150 // ----------------------------
  151 
  152   // Double Registers
  153   // The rules of ADL require that double registers be defined in pairs.
  154   // Each pair must be two 32-bit values, but not necessarily a pair of
  155   // single float registers. In each pair, ADLC-assigned register numbers
  156   // must be adjacent, with the lower number even. Finally, when the
  157   // CPU stores such a register pair to memory, the word associated with
  158   // the lower ADLC-assigned number must be stored to the lower address.
  159 
  160   // PPC64 has 32 64-bit floating-point registers. Each can store a single
  161   // or double precision floating-point value.
  162 
  163   // types: v = volatile, nv = non-volatile, s = system
  164   reg_def F0   ( SOC, SOC, Op_RegF,  0, F0->as_VMReg()         );  // v   scratch
  165   reg_def F0_H ( SOC, SOC, Op_RegF, 99, F0->as_VMReg()->next() );
  166   reg_def F1   ( SOC, SOC, Op_RegF,  1, F1->as_VMReg()         );  // v   farg1 & fret
  167   reg_def F1_H ( SOC, SOC, Op_RegF, 99, F1->as_VMReg()->next() );
  168   reg_def F2   ( SOC, SOC, Op_RegF,  2, F2->as_VMReg()         );  // v   farg2
  169   reg_def F2_H ( SOC, SOC, Op_RegF, 99, F2->as_VMReg()->next() );
  170   reg_def F3   ( SOC, SOC, Op_RegF,  3, F3->as_VMReg()         );  // v   farg3
  171   reg_def F3_H ( SOC, SOC, Op_RegF, 99, F3->as_VMReg()->next() );
  172   reg_def F4   ( SOC, SOC, Op_RegF,  4, F4->as_VMReg()         );  // v   farg4
  173   reg_def F4_H ( SOC, SOC, Op_RegF, 99, F4->as_VMReg()->next() );
  174   reg_def F5   ( SOC, SOC, Op_RegF,  5, F5->as_VMReg()         );  // v   farg5
  175   reg_def F5_H ( SOC, SOC, Op_RegF, 99, F5->as_VMReg()->next() );
  176   reg_def F6   ( SOC, SOC, Op_RegF,  6, F6->as_VMReg()         );  // v   farg6
  177   reg_def F6_H ( SOC, SOC, Op_RegF, 99, F6->as_VMReg()->next() );
  178   reg_def F7   ( SOC, SOC, Op_RegF,  7, F7->as_VMReg()         );  // v   farg7
  179   reg_def F7_H ( SOC, SOC, Op_RegF, 99, F7->as_VMReg()->next() );
  180   reg_def F8   ( SOC, SOC, Op_RegF,  8, F8->as_VMReg()         );  // v   farg8
  181   reg_def F8_H ( SOC, SOC, Op_RegF, 99, F8->as_VMReg()->next() );
  182   reg_def F9   ( SOC, SOC, Op_RegF,  9, F9->as_VMReg()         );  // v   farg9
  183   reg_def F9_H ( SOC, SOC, Op_RegF, 99, F9->as_VMReg()->next() );
  184   reg_def F10  ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()        );  // v   farg10
  185   reg_def F10_H( SOC, SOC, Op_RegF, 99, F10->as_VMReg()->next());
  186   reg_def F11  ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()        );  // v   farg11
  187   reg_def F11_H( SOC, SOC, Op_RegF, 99, F11->as_VMReg()->next());
  188   reg_def F12  ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()        );  // v   farg12
  189   reg_def F12_H( SOC, SOC, Op_RegF, 99, F12->as_VMReg()->next());
  190   reg_def F13  ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()        );  // v   farg13
  191   reg_def F13_H( SOC, SOC, Op_RegF, 99, F13->as_VMReg()->next());
  192   reg_def F14  ( SOC, SOE, Op_RegF, 14, F14->as_VMReg()        );  // nv
  193   reg_def F14_H( SOC, SOE, Op_RegF, 99, F14->as_VMReg()->next());
  194   reg_def F15  ( SOC, SOE, Op_RegF, 15, F15->as_VMReg()        );  // nv
  195   reg_def F15_H( SOC, SOE, Op_RegF, 99, F15->as_VMReg()->next());
  196   reg_def F16  ( SOC, SOE, Op_RegF, 16, F16->as_VMReg()        );  // nv
  197   reg_def F16_H( SOC, SOE, Op_RegF, 99, F16->as_VMReg()->next());
  198   reg_def F17  ( SOC, SOE, Op_RegF, 17, F17->as_VMReg()        );  // nv
  199   reg_def F17_H( SOC, SOE, Op_RegF, 99, F17->as_VMReg()->next());
  200   reg_def F18  ( SOC, SOE, Op_RegF, 18, F18->as_VMReg()        );  // nv
  201   reg_def F18_H( SOC, SOE, Op_RegF, 99, F18->as_VMReg()->next());
  202   reg_def F19  ( SOC, SOE, Op_RegF, 19, F19->as_VMReg()        );  // nv
  203   reg_def F19_H( SOC, SOE, Op_RegF, 99, F19->as_VMReg()->next());
  204   reg_def F20  ( SOC, SOE, Op_RegF, 20, F20->as_VMReg()        );  // nv
  205   reg_def F20_H( SOC, SOE, Op_RegF, 99, F20->as_VMReg()->next());
  206   reg_def F21  ( SOC, SOE, Op_RegF, 21, F21->as_VMReg()        );  // nv
  207   reg_def F21_H( SOC, SOE, Op_RegF, 99, F21->as_VMReg()->next());
  208   reg_def F22  ( SOC, SOE, Op_RegF, 22, F22->as_VMReg()        );  // nv
  209   reg_def F22_H( SOC, SOE, Op_RegF, 99, F22->as_VMReg()->next());
  210   reg_def F23  ( SOC, SOE, Op_RegF, 23, F23->as_VMReg()        );  // nv
  211   reg_def F23_H( SOC, SOE, Op_RegF, 99, F23->as_VMReg()->next());
  212   reg_def F24  ( SOC, SOE, Op_RegF, 24, F24->as_VMReg()        );  // nv
  213   reg_def F24_H( SOC, SOE, Op_RegF, 99, F24->as_VMReg()->next());
  214   reg_def F25  ( SOC, SOE, Op_RegF, 25, F25->as_VMReg()        );  // nv
  215   reg_def F25_H( SOC, SOE, Op_RegF, 99, F25->as_VMReg()->next());
  216   reg_def F26  ( SOC, SOE, Op_RegF, 26, F26->as_VMReg()        );  // nv
  217   reg_def F26_H( SOC, SOE, Op_RegF, 99, F26->as_VMReg()->next());
  218   reg_def F27  ( SOC, SOE, Op_RegF, 27, F27->as_VMReg()        );  // nv
  219   reg_def F27_H( SOC, SOE, Op_RegF, 99, F27->as_VMReg()->next());
  220   reg_def F28  ( SOC, SOE, Op_RegF, 28, F28->as_VMReg()        );  // nv
  221   reg_def F28_H( SOC, SOE, Op_RegF, 99, F28->as_VMReg()->next());
  222   reg_def F29  ( SOC, SOE, Op_RegF, 29, F29->as_VMReg()        );  // nv
  223   reg_def F29_H( SOC, SOE, Op_RegF, 99, F29->as_VMReg()->next());
  224   reg_def F30  ( SOC, SOE, Op_RegF, 30, F30->as_VMReg()        );  // nv
  225   reg_def F30_H( SOC, SOE, Op_RegF, 99, F30->as_VMReg()->next());
  226   reg_def F31  ( SOC, SOE, Op_RegF, 31, F31->as_VMReg()        );  // nv
  227   reg_def F31_H( SOC, SOE, Op_RegF, 99, F31->as_VMReg()->next());
  228 
  229 // ----------------------------
  230 // Special Registers
  231 // ----------------------------
  232 
  233 // Condition Codes Flag Registers
  234 
  235   // PPC64 has 8 condition code "registers" which are all contained
  236   // in the CR register.
  237 
  238   // types: v = volatile, nv = non-volatile, s = system
  239   reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg());  // v
  240   reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg());  // v
  241   reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg());  // nv
  242   reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg());  // nv
  243   reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg());  // nv
  244   reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg());  // v
  245   reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg());  // v
  246   reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg());  // v
  247 
  248   // Special registers of PPC64
  249 
  250   reg_def SR_XER(    SOC, SOC, Op_RegP, 0, SR_XER->as_VMReg());     // v
  251   reg_def SR_LR(     SOC, SOC, Op_RegP, 1, SR_LR->as_VMReg());      // v
  252   reg_def SR_CTR(    SOC, SOC, Op_RegP, 2, SR_CTR->as_VMReg());     // v
  253   reg_def SR_VRSAVE( SOC, SOC, Op_RegP, 3, SR_VRSAVE->as_VMReg());  // v
  254   reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
  255   reg_def SR_PPR(    SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg());     // v
  256 
  257 // ----------------------------
  258 // Vector-Scalar Registers
  259 // ----------------------------
  260   // 1st 32 VSRs are aliases for the FPRs which are already defined above.
  261   reg_def VSR0 ( SOC, SOC, Op_VecX, 0, VMRegImpl::Bad());
  262   reg_def VSR1 ( SOC, SOC, Op_VecX, 1, VMRegImpl::Bad());
  263   reg_def VSR2 ( SOC, SOC, Op_VecX, 2, VMRegImpl::Bad());
  264   reg_def VSR3 ( SOC, SOC, Op_VecX, 3, VMRegImpl::Bad());
  265   reg_def VSR4 ( SOC, SOC, Op_VecX, 4, VMRegImpl::Bad());
  266   reg_def VSR5 ( SOC, SOC, Op_VecX, 5, VMRegImpl::Bad());
  267   reg_def VSR6 ( SOC, SOC, Op_VecX, 6, VMRegImpl::Bad());
  268   reg_def VSR7 ( SOC, SOC, Op_VecX, 7, VMRegImpl::Bad());
  269   reg_def VSR8 ( SOC, SOC, Op_VecX, 8, VMRegImpl::Bad());
  270   reg_def VSR9 ( SOC, SOC, Op_VecX, 9, VMRegImpl::Bad());
  271   reg_def VSR10 ( SOC, SOC, Op_VecX, 10, VMRegImpl::Bad());
  272   reg_def VSR11 ( SOC, SOC, Op_VecX, 11, VMRegImpl::Bad());
  273   reg_def VSR12 ( SOC, SOC, Op_VecX, 12, VMRegImpl::Bad());
  274   reg_def VSR13 ( SOC, SOC, Op_VecX, 13, VMRegImpl::Bad());
  275   reg_def VSR14 ( SOC, SOE, Op_VecX, 14, VMRegImpl::Bad());
  276   reg_def VSR15 ( SOC, SOE, Op_VecX, 15, VMRegImpl::Bad());
  277   reg_def VSR16 ( SOC, SOE, Op_VecX, 16, VMRegImpl::Bad());
  278   reg_def VSR17 ( SOC, SOE, Op_VecX, 17, VMRegImpl::Bad());
  279   reg_def VSR18 ( SOC, SOE, Op_VecX, 18, VMRegImpl::Bad());
  280   reg_def VSR19 ( SOC, SOE, Op_VecX, 19, VMRegImpl::Bad());
  281   reg_def VSR20 ( SOC, SOE, Op_VecX, 20, VMRegImpl::Bad());
  282   reg_def VSR21 ( SOC, SOE, Op_VecX, 21, VMRegImpl::Bad());
  283   reg_def VSR22 ( SOC, SOE, Op_VecX, 22, VMRegImpl::Bad());
  284   reg_def VSR23 ( SOC, SOE, Op_VecX, 23, VMRegImpl::Bad());
  285   reg_def VSR24 ( SOC, SOE, Op_VecX, 24, VMRegImpl::Bad());
  286   reg_def VSR25 ( SOC, SOE, Op_VecX, 25, VMRegImpl::Bad());
  287   reg_def VSR26 ( SOC, SOE, Op_VecX, 26, VMRegImpl::Bad());
  288   reg_def VSR27 ( SOC, SOE, Op_VecX, 27, VMRegImpl::Bad());
  289   reg_def VSR28 ( SOC, SOE, Op_VecX, 28, VMRegImpl::Bad());
  290   reg_def VSR29 ( SOC, SOE, Op_VecX, 29, VMRegImpl::Bad());
  291   reg_def VSR30 ( SOC, SOE, Op_VecX, 30, VMRegImpl::Bad());
  292   reg_def VSR31 ( SOC, SOE, Op_VecX, 31, VMRegImpl::Bad());
  293   // 2nd 32 VSRs are aliases for the VRs which are only defined here.
  294   reg_def VSR32 ( SOC, SOC, Op_VecX, 32, VSR32->as_VMReg());
  295   reg_def VSR33 ( SOC, SOC, Op_VecX, 33, VSR33->as_VMReg());
  296   reg_def VSR34 ( SOC, SOC, Op_VecX, 34, VSR34->as_VMReg());
  297   reg_def VSR35 ( SOC, SOC, Op_VecX, 35, VSR35->as_VMReg());
  298   reg_def VSR36 ( SOC, SOC, Op_VecX, 36, VSR36->as_VMReg());
  299   reg_def VSR37 ( SOC, SOC, Op_VecX, 37, VSR37->as_VMReg());
  300   reg_def VSR38 ( SOC, SOC, Op_VecX, 38, VSR38->as_VMReg());
  301   reg_def VSR39 ( SOC, SOC, Op_VecX, 39, VSR39->as_VMReg());
  302   reg_def VSR40 ( SOC, SOC, Op_VecX, 40, VSR40->as_VMReg());
  303   reg_def VSR41 ( SOC, SOC, Op_VecX, 41, VSR41->as_VMReg());
  304   reg_def VSR42 ( SOC, SOC, Op_VecX, 42, VSR42->as_VMReg());
  305   reg_def VSR43 ( SOC, SOC, Op_VecX, 43, VSR43->as_VMReg());
  306   reg_def VSR44 ( SOC, SOC, Op_VecX, 44, VSR44->as_VMReg());
  307   reg_def VSR45 ( SOC, SOC, Op_VecX, 45, VSR45->as_VMReg());
  308   reg_def VSR46 ( SOC, SOC, Op_VecX, 46, VSR46->as_VMReg());
  309   reg_def VSR47 ( SOC, SOC, Op_VecX, 47, VSR47->as_VMReg());
  310   reg_def VSR48 ( SOC, SOC, Op_VecX, 48, VSR48->as_VMReg());
  311   reg_def VSR49 ( SOC, SOC, Op_VecX, 49, VSR49->as_VMReg());
  312   reg_def VSR50 ( SOC, SOC, Op_VecX, 50, VSR50->as_VMReg());
  313   reg_def VSR51 ( SOC, SOC, Op_VecX, 51, VSR51->as_VMReg());
  314   reg_def VSR52 ( SOC, SOE, Op_VecX, 52, VSR52->as_VMReg());
  315   reg_def VSR53 ( SOC, SOE, Op_VecX, 53, VSR53->as_VMReg());
  316   reg_def VSR54 ( SOC, SOE, Op_VecX, 54, VSR54->as_VMReg());
  317   reg_def VSR55 ( SOC, SOE, Op_VecX, 55, VSR55->as_VMReg());
  318   reg_def VSR56 ( SOC, SOE, Op_VecX, 56, VSR56->as_VMReg());
  319   reg_def VSR57 ( SOC, SOE, Op_VecX, 57, VSR57->as_VMReg());
  320   reg_def VSR58 ( SOC, SOE, Op_VecX, 58, VSR58->as_VMReg());
  321   reg_def VSR59 ( SOC, SOE, Op_VecX, 59, VSR59->as_VMReg());
  322   reg_def VSR60 ( SOC, SOE, Op_VecX, 60, VSR60->as_VMReg());
  323   reg_def VSR61 ( SOC, SOE, Op_VecX, 61, VSR61->as_VMReg());
  324   reg_def VSR62 ( SOC, SOE, Op_VecX, 62, VSR62->as_VMReg());
  325   reg_def VSR63 ( SOC, SOE, Op_VecX, 63, VSR63->as_VMReg());
  326 
  327 // ----------------------------
  328 // Specify priority of register selection within phases of register
  329 // allocation. Highest priority is first. A useful heuristic is to
  330 // give registers a low priority when they are required by machine
  331 // instructions, like EAX and EDX on I486, and choose no-save registers
  332 // before save-on-call, & save-on-call before save-on-entry. Registers
  333 // which participate in fixed calling sequences should come last.
  334 // Registers which are used as pairs must fall on an even boundary.
  335 
  336 // It's worth about 1% on SPEC geomean to get this right.
  337 
  338 // Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration
  339 // in adGlobals_ppc.hpp which defines the <register>_num values, e.g.
  340 // R3_num. Therefore, R3_num may not be (and in reality is not)
  341 // the same as R3->encoding()! Furthermore, we cannot make any
  342 // assumptions on ordering, e.g. R3_num may be less than R2_num.
  343 // Additionally, the function
  344 //   static enum RC rc_class(OptoReg::Name reg )
  345 // maps a given <register>_num value to its chunk type (except for flags)
  346 // and its current implementation relies on chunk0 and chunk1 having a
  347 // size of 64 each.
  348 
  349 // If you change this allocation class, please have a look at the
  350 // default values for the parameters RoundRobinIntegerRegIntervalStart
  351 // and RoundRobinFloatRegIntervalStart
  352 
  353 alloc_class chunk0 (
  354   // Chunk0 contains *all* 64 integer registers halves.
  355 
  356   // "non-volatile" registers
  357   R14, R14_H,
  358   R15, R15_H,
  359   R17, R17_H,
  360   R18, R18_H,
  361   R19, R19_H,
  362   R20, R20_H,
  363   R21, R21_H,
  364   R22, R22_H,
  365   R23, R23_H,
  366   R24, R24_H,
  367   R25, R25_H,
  368   R26, R26_H,
  369   R27, R27_H,
  370   R28, R28_H,
  371   R29, R29_H,
  372   R30, R30_H,
  373   R31, R31_H,
  374 
  375   // scratch/special registers
  376   R11, R11_H,
  377   R12, R12_H,
  378 
  379   // argument registers
  380   R10, R10_H,
  381   R9,  R9_H,
  382   R8,  R8_H,
  383   R7,  R7_H,
  384   R6,  R6_H,
  385   R5,  R5_H,
  386   R4,  R4_H,
  387   R3,  R3_H,
  388 
  389   // special registers, not available for allocation
  390   R16, R16_H,     // R16_thread
  391   R13, R13_H,     // system thread id
  392   R2,  R2_H,      // may be used for TOC
  393   R1,  R1_H,      // SP
  394   R0,  R0_H       // R0 (scratch)
  395 );
  396 
  397 // If you change this allocation class, please have a look at the
  398 // default values for the parameters RoundRobinIntegerRegIntervalStart
  399 // and RoundRobinFloatRegIntervalStart
  400 
  401 alloc_class chunk1 (
  402   // Chunk1 contains *all* 64 floating-point registers halves.
  403 
  404   // scratch register
  405   F0,  F0_H,
  406 
  407   // argument registers
  408   F13, F13_H,
  409   F12, F12_H,
  410   F11, F11_H,
  411   F10, F10_H,
  412   F9,  F9_H,
  413   F8,  F8_H,
  414   F7,  F7_H,
  415   F6,  F6_H,
  416   F5,  F5_H,
  417   F4,  F4_H,
  418   F3,  F3_H,
  419   F2,  F2_H,
  420   F1,  F1_H,
  421 
  422   // non-volatile registers
  423   F14, F14_H,
  424   F15, F15_H,
  425   F16, F16_H,
  426   F17, F17_H,
  427   F18, F18_H,
  428   F19, F19_H,
  429   F20, F20_H,
  430   F21, F21_H,
  431   F22, F22_H,
  432   F23, F23_H,
  433   F24, F24_H,
  434   F25, F25_H,
  435   F26, F26_H,
  436   F27, F27_H,
  437   F28, F28_H,
  438   F29, F29_H,
  439   F30, F30_H,
  440   F31, F31_H
  441 );
  442 
  443 alloc_class chunk2 (
  444   // Chunk2 contains *all* 8 condition code registers.
  445 
  446   CCR0,
  447   CCR1,
  448   CCR2,
  449   CCR3,
  450   CCR4,
  451   CCR5,
  452   CCR6,
  453   CCR7
  454 );
  455 
  456 alloc_class chunk3 (
  457   VSR0,
  458   VSR1,
  459   VSR2,
  460   VSR3,
  461   VSR4,
  462   VSR5,
  463   VSR6,
  464   VSR7,
  465   VSR8,
  466   VSR9,
  467   VSR10,
  468   VSR11,
  469   VSR12,
  470   VSR13,
  471   VSR14,
  472   VSR15,
  473   VSR16,
  474   VSR17,
  475   VSR18,
  476   VSR19,
  477   VSR20,
  478   VSR21,
  479   VSR22,
  480   VSR23,
  481   VSR24,
  482   VSR25,
  483   VSR26,
  484   VSR27,
  485   VSR28,
  486   VSR29,
  487   VSR30,
  488   VSR31,
  489   VSR32,
  490   VSR33,
  491   VSR34,
  492   VSR35,
  493   VSR36,
  494   VSR37,
  495   VSR38,
  496   VSR39,
  497   VSR40,
  498   VSR41,
  499   VSR42,
  500   VSR43,
  501   VSR44,
  502   VSR45,
  503   VSR46,
  504   VSR47,
  505   VSR48,
  506   VSR49,
  507   VSR50,
  508   VSR51,
  509   VSR52,
  510   VSR53,
  511   VSR54,
  512   VSR55,
  513   VSR56,
  514   VSR57,
  515   VSR58,
  516   VSR59,
  517   VSR60,
  518   VSR61,
  519   VSR62,
  520   VSR63
  521 );
  522 
  523 alloc_class chunk4 (
  524   // special registers
  525   // These registers are not allocated, but used for nodes generated by postalloc expand.
  526   SR_XER,
  527   SR_LR,
  528   SR_CTR,
  529   SR_VRSAVE,
  530   SR_SPEFSCR,
  531   SR_PPR
  532 );
  533 
  534 //-------Architecture Description Register Classes-----------------------
  535 
  536 // Several register classes are automatically defined based upon
  537 // information in this architecture description.
  538 
  539 // 1) reg_class inline_cache_reg           ( as defined in frame section )
  540 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  541 //
  542 
  543 // ----------------------------
  544 // 32 Bit Register Classes
  545 // ----------------------------
  546 
  547 // We specify registers twice, once as read/write, and once read-only.
  548 // We use the read-only registers for source operands. With this, we
  549 // can include preset read only registers in this class, as a hard-coded
  550 // '0'-register. (We used to simulate this on ppc.)
  551 
  552 // 32 bit registers that can be read and written i.e. these registers
  553 // can be dest (or src) of normal instructions.
  554 reg_class bits32_reg_rw(
  555 /*R0*/              // R0
  556 /*R1*/              // SP
  557   R2,               // TOC
  558   R3,
  559   R4,
  560   R5,
  561   R6,
  562   R7,
  563   R8,
  564   R9,
  565   R10,
  566   R11,
  567   R12,
  568 /*R13*/             // system thread id
  569   R14,
  570   R15,
  571 /*R16*/             // R16_thread
  572   R17,
  573   R18,
  574   R19,
  575   R20,
  576   R21,
  577   R22,
  578   R23,
  579   R24,
  580   R25,
  581   R26,
  582   R27,
  583   R28,
  584 /*R29,*/             // global TOC
  585   R30,
  586   R31
  587 );
  588 
  589 // 32 bit registers that can only be read i.e. these registers can
  590 // only be src of all instructions.
  591 reg_class bits32_reg_ro(
  592 /*R0*/              // R0
  593 /*R1*/              // SP
  594   R2                // TOC
  595   R3,
  596   R4,
  597   R5,
  598   R6,
  599   R7,
  600   R8,
  601   R9,
  602   R10,
  603   R11,
  604   R12,
  605 /*R13*/             // system thread id
  606   R14,
  607   R15,
  608 /*R16*/             // R16_thread
  609   R17,
  610   R18,
  611   R19,
  612   R20,
  613   R21,
  614   R22,
  615   R23,
  616   R24,
  617   R25,
  618   R26,
  619   R27,
  620   R28,
  621 /*R29,*/
  622   R30,
  623   R31
  624 );
  625 
  626 reg_class rscratch1_bits32_reg(R11);
  627 reg_class rscratch2_bits32_reg(R12);
  628 reg_class rarg1_bits32_reg(R3);
  629 reg_class rarg2_bits32_reg(R4);
  630 reg_class rarg3_bits32_reg(R5);
  631 reg_class rarg4_bits32_reg(R6);
  632 
  633 // ----------------------------
  634 // 64 Bit Register Classes
  635 // ----------------------------
  636 // 64-bit build means 64-bit pointers means hi/lo pairs
  637 
  638 reg_class rscratch1_bits64_reg(R11_H, R11);
  639 reg_class rscratch2_bits64_reg(R12_H, R12);
  640 reg_class rarg1_bits64_reg(R3_H, R3);
  641 reg_class rarg2_bits64_reg(R4_H, R4);
  642 reg_class rarg3_bits64_reg(R5_H, R5);
  643 reg_class rarg4_bits64_reg(R6_H, R6);
  644 reg_class rarg5_bits64_reg(R7_H, R7);
  645 reg_class rarg6_bits64_reg(R8_H, R8);
  646 // Thread register, 'written' by tlsLoadP, see there.
  647 reg_class thread_bits64_reg(R16_H, R16);
  648 
  649 reg_class r19_bits64_reg(R19_H, R19);
  650 
  651 // 64 bit registers that can be read and written i.e. these registers
  652 // can be dest (or src) of normal instructions.
  653 reg_class bits64_reg_rw(
  654 /*R0_H,  R0*/     // R0
  655 /*R1_H,  R1*/     // SP
  656   R2_H,  R2,      // TOC
  657   R3_H,  R3,
  658   R4_H,  R4,
  659   R5_H,  R5,
  660   R6_H,  R6,
  661   R7_H,  R7,
  662   R8_H,  R8,
  663   R9_H,  R9,
  664   R10_H, R10,
  665   R11_H, R11,
  666   R12_H, R12,
  667 /*R13_H, R13*/   // system thread id
  668   R14_H, R14,
  669   R15_H, R15,
  670 /*R16_H, R16*/   // R16_thread
  671   R17_H, R17,
  672   R18_H, R18,
  673   R19_H, R19,
  674   R20_H, R20,
  675   R21_H, R21,
  676   R22_H, R22,
  677   R23_H, R23,
  678   R24_H, R24,
  679   R25_H, R25,
  680   R26_H, R26,
  681   R27_H, R27,
  682   R28_H, R28,
  683 /*R29_H, R29,*/
  684   R30_H, R30,
  685   R31_H, R31
  686 );
  687 
  688 // 64 bit registers used excluding r2, r11 and r12
  689 // Used to hold the TOC to avoid collisions with expanded LeafCall which uses
  690 // r2, r11 and r12 internally.
  691 reg_class bits64_reg_leaf_call(
  692 /*R0_H,  R0*/     // R0
  693 /*R1_H,  R1*/     // SP
  694 /*R2_H,  R2*/     // TOC
  695   R3_H,  R3,
  696   R4_H,  R4,
  697   R5_H,  R5,
  698   R6_H,  R6,
  699   R7_H,  R7,
  700   R8_H,  R8,
  701   R9_H,  R9,
  702   R10_H, R10,
  703 /*R11_H, R11*/
  704 /*R12_H, R12*/
  705 /*R13_H, R13*/   // system thread id
  706   R14_H, R14,
  707   R15_H, R15,
  708 /*R16_H, R16*/   // R16_thread
  709   R17_H, R17,
  710   R18_H, R18,
  711   R19_H, R19,
  712   R20_H, R20,
  713   R21_H, R21,
  714   R22_H, R22,
  715   R23_H, R23,
  716   R24_H, R24,
  717   R25_H, R25,
  718   R26_H, R26,
  719   R27_H, R27,
  720   R28_H, R28,
  721 /*R29_H, R29,*/
  722   R30_H, R30,
  723   R31_H, R31
  724 );
  725 
  726 // Used to hold the TOC to avoid collisions with expanded DynamicCall
  727 // which uses r19 as inline cache internally and expanded LeafCall which uses
  728 // r2, r11 and r12 internally.
  729 reg_class bits64_constant_table_base(
  730 /*R0_H,  R0*/     // R0
  731 /*R1_H,  R1*/     // SP
  732 /*R2_H,  R2*/     // TOC
  733   R3_H,  R3,
  734   R4_H,  R4,
  735   R5_H,  R5,
  736   R6_H,  R6,
  737   R7_H,  R7,
  738   R8_H,  R8,
  739   R9_H,  R9,
  740   R10_H, R10,
  741 /*R11_H, R11*/
  742 /*R12_H, R12*/
  743 /*R13_H, R13*/   // system thread id
  744   R14_H, R14,
  745   R15_H, R15,
  746 /*R16_H, R16*/   // R16_thread
  747   R17_H, R17,
  748   R18_H, R18,
  749 /*R19_H, R19*/
  750   R20_H, R20,
  751   R21_H, R21,
  752   R22_H, R22,
  753   R23_H, R23,
  754   R24_H, R24,
  755   R25_H, R25,
  756   R26_H, R26,
  757   R27_H, R27,
  758   R28_H, R28,
  759 /*R29_H, R29,*/
  760   R30_H, R30,
  761   R31_H, R31
  762 );
  763 
  764 // 64 bit registers that can only be read i.e. these registers can
  765 // only be src of all instructions.
  766 reg_class bits64_reg_ro(
  767 /*R0_H,  R0*/     // R0
  768   R1_H,  R1,
  769   R2_H,  R2,       // TOC
  770   R3_H,  R3,
  771   R4_H,  R4,
  772   R5_H,  R5,
  773   R6_H,  R6,
  774   R7_H,  R7,
  775   R8_H,  R8,
  776   R9_H,  R9,
  777   R10_H, R10,
  778   R11_H, R11,
  779   R12_H, R12,
  780 /*R13_H, R13*/   // system thread id
  781   R14_H, R14,
  782   R15_H, R15,
  783   R16_H, R16,    // R16_thread
  784   R17_H, R17,
  785   R18_H, R18,
  786   R19_H, R19,
  787   R20_H, R20,
  788   R21_H, R21,
  789   R22_H, R22,
  790   R23_H, R23,
  791   R24_H, R24,
  792   R25_H, R25,
  793   R26_H, R26,
  794   R27_H, R27,
  795   R28_H, R28,
  796 /*R29_H, R29,*/ // TODO: let allocator handle TOC!!
  797   R30_H, R30,
  798   R31_H, R31
  799 );
  800 
  801 
  802 // ----------------------------
  803 // Special Class for Condition Code Flags Register
  804 
  805 reg_class int_flags(
  806 /*CCR0*/             // scratch
  807 /*CCR1*/             // scratch
  808 /*CCR2*/             // nv!
  809 /*CCR3*/             // nv!
  810 /*CCR4*/             // nv!
  811   CCR5,
  812   CCR6,
  813   CCR7
  814 );
  815 
  816 reg_class int_flags_ro(
  817   CCR0,
  818   CCR1,
  819   CCR2,
  820   CCR3,
  821   CCR4,
  822   CCR5,
  823   CCR6,
  824   CCR7
  825 );
  826 
  827 reg_class int_flags_CR0(CCR0);
  828 reg_class int_flags_CR1(CCR1);
  829 reg_class int_flags_CR6(CCR6);
  830 reg_class ctr_reg(SR_CTR);
  831 
  832 // ----------------------------
  833 // Float Register Classes
  834 // ----------------------------
  835 
  836 reg_class flt_reg(
  837   F0,
  838   F1,
  839   F2,
  840   F3,
  841   F4,
  842   F5,
  843   F6,
  844   F7,
  845   F8,
  846   F9,
  847   F10,
  848   F11,
  849   F12,
  850   F13,
  851   F14,              // nv!
  852   F15,              // nv!
  853   F16,              // nv!
  854   F17,              // nv!
  855   F18,              // nv!
  856   F19,              // nv!
  857   F20,              // nv!
  858   F21,              // nv!
  859   F22,              // nv!
  860   F23,              // nv!
  861   F24,              // nv!
  862   F25,              // nv!
  863   F26,              // nv!
  864   F27,              // nv!
  865   F28,              // nv!
  866   F29,              // nv!
  867   F30,              // nv!
  868   F31               // nv!
  869 );
  870 
  871 // Double precision float registers have virtual `high halves' that
  872 // are needed by the allocator.
  873 reg_class dbl_reg(
  874   F0,  F0_H,
  875   F1,  F1_H,
  876   F2,  F2_H,
  877   F3,  F3_H,
  878   F4,  F4_H,
  879   F5,  F5_H,
  880   F6,  F6_H,
  881   F7,  F7_H,
  882   F8,  F8_H,
  883   F9,  F9_H,
  884   F10, F10_H,
  885   F11, F11_H,
  886   F12, F12_H,
  887   F13, F13_H,
  888   F14, F14_H,    // nv!
  889   F15, F15_H,    // nv!
  890   F16, F16_H,    // nv!
  891   F17, F17_H,    // nv!
  892   F18, F18_H,    // nv!
  893   F19, F19_H,    // nv!
  894   F20, F20_H,    // nv!
  895   F21, F21_H,    // nv!
  896   F22, F22_H,    // nv!
  897   F23, F23_H,    // nv!
  898   F24, F24_H,    // nv!
  899   F25, F25_H,    // nv!
  900   F26, F26_H,    // nv!
  901   F27, F27_H,    // nv!
  902   F28, F28_H,    // nv!
  903   F29, F29_H,    // nv!
  904   F30, F30_H,    // nv!
  905   F31, F31_H     // nv!
  906 );
  907 
  908 // ----------------------------
  909 // Vector-Scalar Register Class
  910 // ----------------------------
  911 
  912 reg_class vs_reg(
  913   // Attention: Only these ones are saved & restored at safepoint by RegisterSaver.
  914   VSR32,
  915   VSR33,
  916   VSR34,
  917   VSR35,
  918   VSR36,
  919   VSR37,
  920   VSR38,
  921   VSR39,
  922   VSR40,
  923   VSR41,
  924   VSR42,
  925   VSR43,
  926   VSR44,
  927   VSR45,
  928   VSR46,
  929   VSR47,
  930   VSR48,
  931   VSR49,
  932   VSR50,
  933   VSR51
  934   // VSR52-VSR63 // nv!
  935 );
  936 
  937  %}
  938 
  939 //----------DEFINITION BLOCK---------------------------------------------------
  940 // Define name --> value mappings to inform the ADLC of an integer valued name
  941 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  942 // Format:
  943 //        int_def  <name>         ( <int_value>, <expression>);
  944 // Generated Code in ad_<arch>.hpp
  945 //        #define  <name>   (<expression>)
  946 //        // value == <int_value>
  947 // Generated code in ad_<arch>.cpp adlc_verification()
  948 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  949 //
  950 definitions %{
  951   // The default cost (of an ALU instruction).
  952   int_def DEFAULT_COST_LOW        (     30,      30);
  953   int_def DEFAULT_COST            (    100,     100);
  954   int_def HUGE_COST               (1000000, 1000000);
  955 
  956   // Memory refs
  957   int_def MEMORY_REF_COST_LOW     (    200, DEFAULT_COST * 2);
  958   int_def MEMORY_REF_COST         (    300, DEFAULT_COST * 3);
  959 
  960   // Branches are even more expensive.
  961   int_def BRANCH_COST             (    900, DEFAULT_COST * 9);
  962   int_def CALL_COST               (   1300, DEFAULT_COST * 13);
  963 %}
  964 
  965 
  966 //----------SOURCE BLOCK-------------------------------------------------------
  967 // This is a block of C++ code which provides values, functions, and
  968 // definitions necessary in the rest of the architecture description.
  969 source_hpp %{
  970   // Header information of the source block.
  971   // Method declarations/definitions which are used outside
  972   // the ad-scope can conveniently be defined here.
  973   //
  974   // To keep related declarations/definitions/uses close together,
  975   // we switch between source %{ }% and source_hpp %{ }% freely as needed.
  976 
  977 #include "opto/convertnode.hpp"
  978 
  979   // Returns true if Node n is followed by a MemBar node that
  980   // will do an acquire. If so, this node must not do the acquire
  981   // operation.
  982   bool followed_by_acquire(const Node *n);
  983 %}
  984 
  985 source %{
  986 
  987 #include "opto/c2_CodeStubs.hpp"
  988 #include "oops/klass.inline.hpp"
  989 
  990 void PhaseOutput::pd_perform_mach_node_analysis() {
  991 }
  992 
  993 int MachNode::pd_alignment_required() const {
  994   return 1;
  995 }
  996 
  997 int MachNode::compute_padding(int current_offset) const {
  998   return 0;
  999 }
 1000 
 1001 // Should the matcher clone input 'm' of node 'n'?
 1002 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 1003   if (is_encode_and_store_pattern(n, m)) {
 1004     mstack.push(m, Visit);
 1005     return true;
 1006   }
 1007   return false;
 1008 }
 1009 
 1010 // Should the Matcher clone shifts on addressing modes, expecting them
 1011 // to be subsumed into complex addressing expressions or compute them
 1012 // into registers?
 1013 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 1014   return clone_base_plus_offset_address(m, mstack, address_visited);
 1015 }
 1016 
 1017 // Optimize load-acquire.
 1018 //
 1019 // Check if acquire is unnecessary due to following operation that does
 1020 // acquire anyways.
 1021 // Walk the pattern:
 1022 //
 1023 //      n: Load.acq
 1024 //           |
 1025 //      MemBarAcquire
 1026 //       |         |
 1027 //  Proj(ctrl)  Proj(mem)
 1028 //       |         |
 1029 //   MemBarRelease/Volatile
 1030 //
 1031 bool followed_by_acquire(const Node *load) {
 1032   assert(load->is_Load(), "So far implemented only for loads.");
 1033 
 1034   // Find MemBarAcquire.
 1035   const Node *mba = nullptr;
 1036   for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
 1037     const Node *out = load->fast_out(i);
 1038     if (out->Opcode() == Op_MemBarAcquire) {
 1039       if (out->in(0) == load) continue; // Skip control edge, membar should be found via precedence edge.
 1040       mba = out;
 1041       break;
 1042     }
 1043   }
 1044   if (!mba) return false;
 1045 
 1046   // Find following MemBar node.
 1047   //
 1048   // The following node must be reachable by control AND memory
 1049   // edge to assure no other operations are in between the two nodes.
 1050   //
 1051   // So first get the Proj node, mem_proj, to use it to iterate forward.
 1052   Node *mem_proj = nullptr;
 1053   for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) {
 1054     mem_proj = mba->fast_out(i);      // Runs out of bounds and asserts if Proj not found.
 1055     assert(mem_proj->is_Proj(), "only projections here");
 1056     ProjNode *proj = mem_proj->as_Proj();
 1057     if (proj->_con == TypeFunc::Memory &&
 1058         !Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only
 1059       break;
 1060   }
 1061   assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken");
 1062 
 1063   // Search MemBar behind Proj. If there are other memory operations
 1064   // behind the Proj we lost.
 1065   for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) {
 1066     Node *x = mem_proj->fast_out(j);
 1067     // Proj might have an edge to a store or load node which precedes the membar.
 1068     if (x->is_Mem()) return false;
 1069 
 1070     // On PPC64 release and volatile are implemented by an instruction
 1071     // that also has acquire semantics. I.e. there is no need for an
 1072     // acquire before these.
 1073     int xop = x->Opcode();
 1074     if (xop == Op_MemBarRelease || xop == Op_MemBarVolatile) {
 1075       // Make sure we're not missing Call/Phi/MergeMem by checking
 1076       // control edges. The control edge must directly lead back
 1077       // to the MemBarAcquire
 1078       Node *ctrl_proj = x->in(0);
 1079       if (ctrl_proj->is_Proj() && ctrl_proj->in(0) == mba) {
 1080         return true;
 1081       }
 1082     }
 1083   }
 1084 
 1085   return false;
 1086 }
 1087 
 1088 #define __ masm->
 1089 
 1090 // Tertiary op of a LoadP or StoreP encoding.
 1091 #define REGP_OP true
 1092 
 1093 // ****************************************************************************
 1094 
 1095 // REQUIRED FUNCTIONALITY
 1096 
 1097 // !!!!! Special hack to get all type of calls to specify the byte offset
 1098 //       from the start of the call to the point where the return address
 1099 //       will point.
 1100 
 1101 // PPC port: Removed use of lazy constant construct.
 1102 
 1103 int MachCallStaticJavaNode::ret_addr_offset() {
 1104   // It's only a single branch-and-link instruction.
 1105   return 4;
 1106 }
 1107 
 1108 int MachCallDynamicJavaNode::ret_addr_offset() {
 1109   // Offset is 4 with postalloc expanded calls (bl is one instruction). We use
 1110   // postalloc expanded calls if we use inline caches and do not update method data.
 1111   if (UseInlineCaches) return 4;
 1112 
 1113   int vtable_index = this->_vtable_index;
 1114   if (vtable_index < 0) {
 1115     // Must be invalid_vtable_index, not nonvirtual_vtable_index.
 1116     assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
 1117     return 12;
 1118   } else {
 1119     return 24 + MacroAssembler::instr_size_for_decode_klass_not_null();
 1120   }
 1121 }
 1122 
 1123 int MachCallRuntimeNode::ret_addr_offset() {
 1124   if (rule() == CallRuntimeDirect_rule) {
 1125     // CallRuntimeDirectNode uses call_c.
 1126 #if defined(ABI_ELFv2)
 1127     return 28;
 1128 #else
 1129     return 40;
 1130 #endif
 1131   }
 1132   assert(rule() == CallLeafDirect_rule, "unexpected node with rule %u", rule());
 1133   // CallLeafDirectNode uses bl.
 1134   return 4;
 1135 }
 1136 
 1137 //=============================================================================
 1138 
 1139 // condition code conversions
 1140 
 1141 static int cc_to_boint(int cc) {
 1142   return Assembler::bcondCRbiIs0 | (cc & 8);
 1143 }
 1144 
 1145 static int cc_to_inverse_boint(int cc) {
 1146   return Assembler::bcondCRbiIs0 | (8-(cc & 8));
 1147 }
 1148 
 1149 static int cc_to_biint(int cc, int flags_reg) {
 1150   return (flags_reg << 2) | (cc & 3);
 1151 }
 1152 
 1153 //=============================================================================
 1154 
 1155 // Compute padding required for nodes which need alignment. The padding
 1156 // is the number of bytes (not instructions) which will be inserted before
 1157 // the instruction. The padding must match the size of a NOP instruction.
 1158 
 1159 // Add nop if a prefixed (two-word) instruction is going to cross a 64-byte boundary.
 1160 // (See Section 1.6 of Power ISA Version 3.1)
 1161 static int compute_prefix_padding(int current_offset) {
 1162   assert(PowerArchitecturePPC64 >= 10 && (CodeEntryAlignment & 63) == 0,
 1163          "Code buffer must be aligned to a multiple of 64 bytes");
 1164   if (is_aligned(current_offset + BytesPerInstWord, 64)) {
 1165     return BytesPerInstWord;
 1166   }
 1167   return 0;
 1168 }
 1169 
 1170 int loadConI32Node::compute_padding(int current_offset) const {
 1171   return compute_prefix_padding(current_offset);
 1172 }
 1173 
 1174 int loadConL34Node::compute_padding(int current_offset) const {
 1175   return compute_prefix_padding(current_offset);
 1176 }
 1177 
 1178 int addI_reg_imm32Node::compute_padding(int current_offset) const {
 1179   return compute_prefix_padding(current_offset);
 1180 }
 1181 
 1182 int addL_reg_imm34Node::compute_padding(int current_offset) const {
 1183   return compute_prefix_padding(current_offset);
 1184 }
 1185 
 1186 int addP_reg_imm34Node::compute_padding(int current_offset) const {
 1187   return compute_prefix_padding(current_offset);
 1188 }
 1189 
 1190 int cmprb_Whitespace_reg_reg_prefixedNode::compute_padding(int current_offset) const {
 1191   return compute_prefix_padding(current_offset);
 1192 }
 1193 
 1194 
 1195 //=============================================================================
 1196 
 1197 // Emit an interrupt that is caught by the debugger (for debugging compiler).
 1198 void emit_break(C2_MacroAssembler *masm) {
 1199   __ illtrap();
 1200 }
 1201 
 1202 #ifndef PRODUCT
 1203 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1204   st->print("BREAKPOINT");
 1205 }
 1206 #endif
 1207 
 1208 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1209   emit_break(masm);
 1210 }
 1211 
 1212 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1213   return MachNode::size(ra_);
 1214 }
 1215 
 1216 //=============================================================================
 1217 
 1218 void emit_nop(C2_MacroAssembler *masm) {
 1219   __ nop();
 1220 }
 1221 
 1222 static inline void emit_long(C2_MacroAssembler *masm, int value) {
 1223   *((int*)(__ pc())) = value;
 1224   __ set_inst_end(__ pc() + BytesPerInstWord);
 1225 }
 1226 
 1227 //=============================================================================
 1228 
 1229 %} // interrupt source
 1230 
 1231 source_hpp %{ // Header information of the source block.
 1232 
 1233 //--------------------------------------------------------------
 1234 //---<  Used for optimization in Compile::Shorten_branches  >---
 1235 //--------------------------------------------------------------
 1236 
 1237 class C2_MacroAssembler;
 1238 
 1239 class CallStubImpl {
 1240 
 1241  public:
 1242 
 1243   // Emit call stub, compiled java to interpreter.
 1244   static void emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset);
 1245 
 1246   // Size of call trampoline stub.
 1247   // This doesn't need to be accurate to the byte, but it
 1248   // must be larger than or equal to the real size of the stub.
 1249   static uint size_call_trampoline() {
 1250     return MacroAssembler::trampoline_stub_size;
 1251   }
 1252 
 1253   // number of relocations needed by a call trampoline stub
 1254   static uint reloc_call_trampoline() {
 1255     return 5;
 1256   }
 1257 
 1258 };
 1259 
 1260 %} // end source_hpp
 1261 
 1262 source %{
 1263 
 1264 // Emit a trampoline stub for a call to a target which is too far away.
 1265 //
 1266 // code sequences:
 1267 //
 1268 // call-site:
 1269 //   branch-and-link to <destination> or <trampoline stub>
 1270 //
 1271 // Related trampoline stub for this call-site in the stub section:
 1272 //   load the call target from the constant pool
 1273 //   branch via CTR (LR/link still points to the call-site above)
 1274 
 1275 void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset) {
 1276   address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
 1277   if (stub == nullptr) {
 1278     ciEnv::current()->record_out_of_memory_failure();
 1279   }
 1280 }
 1281 
 1282 //=============================================================================
 1283 
 1284 // Emit an inline branch-and-link call and a related trampoline stub.
 1285 //
 1286 // code sequences:
 1287 //
 1288 // call-site:
 1289 //   branch-and-link to <destination> or <trampoline stub>
 1290 //
 1291 // Related trampoline stub for this call-site in the stub section:
 1292 //   load the call target from the constant pool
 1293 //   branch via CTR (LR/link still points to the call-site above)
 1294 //
 1295 
 1296 typedef struct {
 1297   int insts_call_instruction_offset;
 1298   int ret_addr_offset;
 1299 } EmitCallOffsets;
 1300 
 1301 // Emit a branch-and-link instruction that branches to a trampoline.
 1302 // - Remember the offset of the branch-and-link instruction.
 1303 // - Add a relocation at the branch-and-link instruction.
 1304 // - Emit a branch-and-link.
 1305 // - Remember the return pc offset.
 1306 EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler *masm, address entry_point, relocInfo::relocType rtype) {
 1307   EmitCallOffsets offsets = { -1, -1 };
 1308   const int start_offset = __ offset();
 1309   offsets.insts_call_instruction_offset = __ offset();
 1310 
 1311   // No entry point given, use the current pc.
 1312   if (entry_point == nullptr) entry_point = __ pc();
 1313 
 1314   // Put the entry point as a constant into the constant pool.
 1315   const address entry_point_toc_addr   = __ address_constant(entry_point, RelocationHolder::none);
 1316   if (entry_point_toc_addr == nullptr) {
 1317     ciEnv::current()->record_out_of_memory_failure();
 1318     return offsets;
 1319   }
 1320   const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 1321 
 1322   // Emit the trampoline stub which will be related to the branch-and-link below.
 1323   CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
 1324   if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
 1325   __ relocate(rtype);
 1326 
 1327   // Note: At this point we do not have the address of the trampoline
 1328   // stub, and the entry point might be too far away for bl, so __ pc()
 1329   // serves as dummy and the bl will be patched later.
 1330   __ bl((address) __ pc());
 1331 
 1332   offsets.ret_addr_offset = __ offset() - start_offset;
 1333 
 1334   return offsets;
 1335 }
 1336 
 1337 //=============================================================================
 1338 
 1339 // Factory for creating loadConL* nodes for large/small constant pool.
 1340 
 1341 static inline jlong replicate_immF(float con) {
 1342   // Replicate float con 2 times and pack into vector.
 1343   int val = *((int*)&con);
 1344   jlong lval = val;
 1345   lval = (lval << 32) | (lval & 0xFFFFFFFFl);
 1346   return lval;
 1347 }
 1348 
 1349 //=============================================================================
 1350 
 1351 const RegMask& MachConstantBaseNode::_out_RegMask = BITS64_CONSTANT_TABLE_BASE_mask();
 1352 int ConstantTable::calculate_table_base_offset() const {
 1353   return 0;  // absolute addressing, no offset
 1354 }
 1355 
 1356 bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
 1357 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1358   iRegPdstOper *op_dst = new iRegPdstOper();
 1359   MachNode *m1 = new loadToc_hiNode();
 1360   MachNode *m2 = new loadToc_loNode();
 1361 
 1362   m1->add_req(nullptr);
 1363   m2->add_req(nullptr, m1);
 1364   m1->_opnds[0] = op_dst;
 1365   m2->_opnds[0] = op_dst;
 1366   m2->_opnds[1] = op_dst;
 1367   ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 1368   ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 1369   nodes->push(m1);
 1370   nodes->push(m2);
 1371 }
 1372 
 1373 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1374   // Is postalloc expanded.
 1375   ShouldNotReachHere();
 1376 }
 1377 
 1378 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1379   return 0;
 1380 }
 1381 
 1382 #ifndef PRODUCT
 1383 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1384   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1385 }
 1386 #endif
 1387 
 1388 //=============================================================================
 1389 
 1390 #ifndef PRODUCT
 1391 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1392   Compile* C = ra_->C;
 1393   const long framesize = C->output()->frame_slots() << LogBytesPerInt;
 1394 
 1395   st->print("PROLOG\n\t");
 1396   if (C->output()->need_stack_bang(framesize)) {
 1397     st->print("stack_overflow_check\n\t");
 1398   }
 1399 
 1400   if (!false /* TODO: PPC port C->is_frameless_method()*/) {
 1401     st->print("save return pc\n\t");
 1402     st->print("push frame %ld\n\t", -framesize);
 1403   }
 1404 
 1405   if (C->stub_function() == nullptr) {
 1406     st->print("nmethod entry barrier\n\t");
 1407   }
 1408 }
 1409 #endif
 1410 
 1411 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1412   Compile* C = ra_->C;
 1413 
 1414   const long framesize = C->output()->frame_size_in_bytes();
 1415   assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
 1416 
 1417   const bool method_is_frameless      = false /* TODO: PPC port C->is_frameless_method()*/;
 1418 
 1419   const Register return_pc            = R20; // Must match return_addr() in frame section.
 1420   const Register callers_sp           = R21;
 1421   const Register push_frame_temp      = R22;
 1422   const Register toc_temp             = R23;
 1423   assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp);
 1424 
 1425   if (method_is_frameless) {
 1426     // Add nop at beginning of all frameless methods to prevent any
 1427     // oop instructions from getting overwritten by make_not_entrant
 1428     // (patching attempt would fail).
 1429     __ nop();
 1430   } else {
 1431     // Get return pc.
 1432     __ mflr(return_pc);
 1433   }
 1434 
 1435   if (C->clinit_barrier_on_entry()) {
 1436     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1437 
 1438     Label L_skip_barrier;
 1439     Register klass = toc_temp;
 1440 
 1441     // Notify OOP recorder (don't need the relocation)
 1442     AddressLiteral md = __ constant_metadata_address(C->method()->holder()->constant_encoding());
 1443     __ load_const_optimized(klass, md.value(), R0);
 1444     __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
 1445 
 1446     __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
 1447     __ mtctr(klass);
 1448     __ bctr();
 1449 
 1450     __ bind(L_skip_barrier);
 1451   }
 1452 
 1453   // Calls to C2R adapters often do not accept exceptional returns.
 1454   // We require that their callers must bang for them. But be
 1455   // careful, because some VM calls (such as call site linkage) can
 1456   // use several kilobytes of stack. But the stack safety zone should
 1457   // account for that. See bugs 4446381, 4468289, 4497237.
 1458 
 1459   int bangsize = C->output()->bang_size_in_bytes();
 1460   assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
 1461   if (C->output()->need_stack_bang(bangsize)) {
 1462     // Unfortunately we cannot use the function provided in
 1463     // assembler.cpp as we have to emulate the pipes. So I had to
 1464     // insert the code of generate_stack_overflow_check(), see
 1465     // assembler.cpp for some illuminative comments.
 1466     const int page_size = os::vm_page_size();
 1467     int bang_end = StackOverflow::stack_shadow_zone_size();
 1468 
 1469     // This is how far the previous frame's stack banging extended.
 1470     const int bang_end_safe = bang_end;
 1471 
 1472     if (bangsize > page_size) {
 1473       bang_end += bangsize;
 1474     }
 1475 
 1476     int bang_offset = bang_end_safe;
 1477 
 1478     while (bang_offset <= bang_end) {
 1479       // Need at least one stack bang at end of shadow zone.
 1480 
 1481       // Again I had to copy code, this time from assembler_ppc.cpp,
 1482       // bang_stack_with_offset - see there for comments.
 1483 
 1484       // Stack grows down, caller passes positive offset.
 1485       assert(bang_offset > 0, "must bang with positive offset");
 1486 
 1487       long stdoffset = -bang_offset;
 1488 
 1489       if (Assembler::is_simm(stdoffset, 16)) {
 1490         // Signed 16 bit offset, a simple std is ok.
 1491         if (UseLoadInstructionsForStackBangingPPC64) {
 1492           __ ld(R0,  (int)(signed short)stdoffset, R1_SP);
 1493         } else {
 1494           __ std(R0, (int)(signed short)stdoffset, R1_SP);
 1495         }
 1496       } else if (Assembler::is_simm(stdoffset, 31)) {
 1497         // Use largeoffset calculations for addis & ld/std.
 1498         const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
 1499         const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
 1500 
 1501         Register tmp = R11;
 1502         __ addis(tmp, R1_SP, hi);
 1503         if (UseLoadInstructionsForStackBangingPPC64) {
 1504           __ ld(R0, lo, tmp);
 1505         } else {
 1506           __ std(R0, lo, tmp);
 1507         }
 1508       } else {
 1509         ShouldNotReachHere();
 1510       }
 1511 
 1512       bang_offset += page_size;
 1513     }
 1514     // R11 trashed
 1515   } // C->output()->need_stack_bang(framesize)
 1516 
 1517   unsigned int bytes = (unsigned int)framesize;
 1518   long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
 1519   ciMethod *currMethod = C->method();
 1520 
 1521   if (!method_is_frameless) {
 1522     // Get callers sp.
 1523     __ mr(callers_sp, R1_SP);
 1524 
 1525     // Push method's frame, modifies SP.
 1526     assert(Assembler::is_uimm(framesize, 32U), "wrong type");
 1527     // The ABI is already accounted for in 'framesize' via the
 1528     // 'out_preserve' area.
 1529     Register tmp = push_frame_temp;
 1530     // Had to insert code of push_frame((unsigned int)framesize, push_frame_temp).
 1531     if (Assembler::is_simm(-offset, 16)) {
 1532       __ stdu(R1_SP, -offset, R1_SP);
 1533     } else {
 1534       long x = -offset;
 1535       // Had to insert load_const(tmp, -offset).
 1536       __ lis( tmp, (int)((signed short)(((x >> 32) & 0xffff0000) >> 16)));
 1537       __ ori( tmp, tmp, ((x >> 32) & 0x0000ffff));
 1538       __ sldi(tmp, tmp, 32);
 1539       __ oris(tmp, tmp, (x & 0xffff0000) >> 16);
 1540       __ ori( tmp, tmp, (x & 0x0000ffff));
 1541 
 1542       __ stdux(R1_SP, R1_SP, tmp);
 1543     }
 1544   }
 1545 #if 0 // TODO: PPC port
 1546   // For testing large constant pools, emit a lot of constants to constant pool.
 1547   // "Randomize" const_size.
 1548   if (ConstantsALot) {
 1549     const int num_consts = const_size();
 1550     for (int i = 0; i < num_consts; i++) {
 1551       __ long_constant(0xB0B5B00BBABE);
 1552     }
 1553   }
 1554 #endif
 1555   if (!method_is_frameless) {
 1556     // Save return pc.
 1557     __ std(return_pc, _abi0(lr), callers_sp);
 1558   }
 1559 
 1560   if (C->stub_function() == nullptr) {
 1561     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1562     bs->nmethod_entry_barrier(masm, push_frame_temp);
 1563   }
 1564 
 1565   C->output()->set_frame_complete(__ offset());
 1566 }
 1567 
 1568 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
 1569   // Variable size. determine dynamically.
 1570   return MachNode::size(ra_);
 1571 }
 1572 
 1573 int MachPrologNode::reloc() const {
 1574   // Return number of relocatable values contained in this instruction.
 1575   return 1; // 1 reloc entry for load_const(toc).
 1576 }
 1577 
 1578 //=============================================================================
 1579 
 1580 #ifndef PRODUCT
 1581 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1582   Compile* C = ra_->C;
 1583 
 1584   st->print("EPILOG\n\t");
 1585   st->print("restore return pc\n\t");
 1586   st->print("pop frame\n\t");
 1587 
 1588   if (do_polling() && C->is_method_compilation()) {
 1589     st->print("safepoint poll\n\t");
 1590   }
 1591 }
 1592 #endif
 1593 
 1594 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1595   Compile* C = ra_->C;
 1596 
 1597   const long framesize = ((long)C->output()->frame_slots()) << LogBytesPerInt;
 1598   assert(framesize >= 0, "negative frame-size?");
 1599 
 1600   const bool method_needs_polling = do_polling() && C->is_method_compilation();
 1601   const bool method_is_frameless  = false /* TODO: PPC port C->is_frameless_method()*/;
 1602   const Register return_pc        = R31;  // Must survive C-call to enable_stack_reserved_zone().
 1603   const Register temp             = R12;
 1604 
 1605   if (!method_is_frameless) {
 1606     // Restore return pc relative to callers' sp.
 1607     __ ld(return_pc, ((int)framesize) + _abi0(lr), R1_SP);
 1608     // Move return pc to LR.
 1609     __ mtlr(return_pc);
 1610     // Pop frame (fixed frame-size).
 1611     __ addi(R1_SP, R1_SP, (int)framesize);
 1612   }
 1613 
 1614   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1615     __ reserved_stack_check(return_pc);
 1616   }
 1617 
 1618   if (method_needs_polling) {
 1619     Label dummy_label;
 1620     Label* code_stub = &dummy_label;
 1621     if (!UseSIGTRAP && !C->output()->in_scratch_emit_size()) {
 1622       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1623       C->output()->add_stub(stub);
 1624       code_stub = &stub->entry();
 1625       __ relocate(relocInfo::poll_return_type);
 1626     }
 1627     __ safepoint_poll(*code_stub, temp, true /* at_return */, true /* in_nmethod */);
 1628   }
 1629 }
 1630 
 1631 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1632   // Variable size. Determine dynamically.
 1633   return MachNode::size(ra_);
 1634 }
 1635 
 1636 int MachEpilogNode::reloc() const {
 1637   // Return number of relocatable values contained in this instruction.
 1638   return 1; // 1 for load_from_polling_page.
 1639 }
 1640 
 1641 const Pipeline * MachEpilogNode::pipeline() const {
 1642   return MachNode::pipeline_class();
 1643 }
 1644 
 1645 // =============================================================================
 1646 
 1647 // Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
 1648 // rc_stack.
 1649 enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
 1650 
 1651 static enum RC rc_class(OptoReg::Name reg) {
 1652   // Return the register class for the given register. The given register
 1653   // reg is a <register>_num value, which is an index into the MachRegisterNumbers
 1654   // enumeration in adGlobals_ppc.hpp.
 1655 
 1656   if (reg == OptoReg::Bad) return rc_bad;
 1657 
 1658   // We have 64 integer register halves, starting at index 0.
 1659   if (reg < 64) return rc_int;
 1660 
 1661   // We have 64 floating-point register halves, starting at index 64.
 1662   if (reg < 64+64) return rc_float;
 1663 
 1664   // We have 64 vector-scalar registers, starting at index 128.
 1665   if (reg < 64+64+64) return rc_vs;
 1666 
 1667   // Between float regs & stack are the flags regs.
 1668   assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
 1669 
 1670   return rc_stack;
 1671 }
 1672 
 1673 static int ld_st_helper(C2_MacroAssembler *masm, const char *op_str, uint opcode, int reg, int offset,
 1674                         bool do_print, Compile* C, outputStream *st) {
 1675 
 1676   assert(opcode == Assembler::LD_OPCODE   ||
 1677          opcode == Assembler::STD_OPCODE  ||
 1678          opcode == Assembler::LWZ_OPCODE  ||
 1679          opcode == Assembler::STW_OPCODE  ||
 1680          opcode == Assembler::LFD_OPCODE  ||
 1681          opcode == Assembler::STFD_OPCODE ||
 1682          opcode == Assembler::LFS_OPCODE  ||
 1683          opcode == Assembler::STFS_OPCODE,
 1684          "opcode not supported");
 1685 
 1686   if (masm) {
 1687     int d =
 1688       (Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
 1689         Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
 1690       : Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
 1691     emit_long(masm, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
 1692   }
 1693 #ifndef PRODUCT
 1694   else if (do_print) {
 1695     st->print("%-7s %s, [R1_SP + #%d+%d] \t// spill copy",
 1696               op_str,
 1697               Matcher::regName[reg],
 1698               offset, 0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/);
 1699   }
 1700 #endif
 1701   return 4; // size
 1702 }
 1703 
 1704 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1705   Compile* C = ra_->C;
 1706 
 1707   // Get registers to move.
 1708   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1709   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1710   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1711   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1712 
 1713   enum RC src_hi_rc = rc_class(src_hi);
 1714   enum RC src_lo_rc = rc_class(src_lo);
 1715   enum RC dst_hi_rc = rc_class(dst_hi);
 1716   enum RC dst_lo_rc = rc_class(dst_lo);
 1717 
 1718   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1719   if (src_hi != OptoReg::Bad)
 1720     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1721            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1722            "expected aligned-adjacent pairs");
 1723   // Generate spill code!
 1724   int size = 0;
 1725 
 1726   if (src_lo == dst_lo && src_hi == dst_hi)
 1727     return size;            // Self copy, no move.
 1728 
 1729   if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
 1730     // Memory->Memory Spill.
 1731     if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1732       int src_offset = ra_->reg2offset(src_lo);
 1733       int dst_offset = ra_->reg2offset(dst_lo);
 1734       if (masm) {
 1735         __ ld(R0, src_offset, R1_SP);
 1736         __ std(R0, dst_offset, R1_SP);
 1737         __ ld(R0, src_offset+8, R1_SP);
 1738         __ std(R0, dst_offset+8, R1_SP);
 1739       }
 1740       size += 16;
 1741     }
 1742     // VectorSRegister->Memory Spill.
 1743     else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
 1744       VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
 1745       int dst_offset = ra_->reg2offset(dst_lo);
 1746       if (masm) {
 1747         __ addi(R0, R1_SP, dst_offset);
 1748         __ stxvd2x(Rsrc, R0);
 1749       }
 1750       size += 8;
 1751     }
 1752     // Memory->VectorSRegister Spill.
 1753     else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
 1754       VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
 1755       int src_offset = ra_->reg2offset(src_lo);
 1756       if (masm) {
 1757         __ addi(R0, R1_SP, src_offset);
 1758         __ lxvd2x(Rdst, R0);
 1759       }
 1760       size += 8;
 1761     }
 1762     // VectorSRegister->VectorSRegister.
 1763     else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
 1764       VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
 1765       VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
 1766       if (masm) {
 1767         __ xxlor(Rdst, Rsrc, Rsrc);
 1768       }
 1769       size += 4;
 1770     }
 1771     else {
 1772       ShouldNotReachHere(); // No VSR spill.
 1773     }
 1774     return size;
 1775   }
 1776 
 1777   // --------------------------------------
 1778   // Memory->Memory Spill. Use R0 to hold the value.
 1779   if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1780     int src_offset = ra_->reg2offset(src_lo);
 1781     int dst_offset = ra_->reg2offset(dst_lo);
 1782     if (src_hi != OptoReg::Bad) {
 1783       assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
 1784              "expected same type of move for high parts");
 1785       size += ld_st_helper(masm, "LD  ", Assembler::LD_OPCODE,  R0_num, src_offset, !do_size, C, st);
 1786       if (!masm && !do_size) st->print("\n\t");
 1787       size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
 1788     } else {
 1789       size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
 1790       if (!masm && !do_size) st->print("\n\t");
 1791       size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
 1792     }
 1793     return size;
 1794   }
 1795 
 1796   // --------------------------------------
 1797   // Check for float->int copy; requires a trip through memory.
 1798   if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
 1799     Unimplemented();
 1800   }
 1801 
 1802   // --------------------------------------
 1803   // Check for integer reg-reg copy.
 1804   if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
 1805       Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
 1806       Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
 1807       size = (Rsrc != Rdst) ? 4 : 0;
 1808 
 1809       if (masm) {
 1810         if (size) {
 1811           __ mr(Rdst, Rsrc);
 1812         }
 1813       }
 1814 #ifndef PRODUCT
 1815       else if (!do_size) {
 1816         if (size) {
 1817           st->print("%-7s %s, %s \t// spill copy", "MR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 1818         } else {
 1819           st->print("%-7s %s, %s \t// spill copy", "MR-NOP", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 1820         }
 1821       }
 1822 #endif
 1823       return size;
 1824   }
 1825 
 1826   // Check for integer store.
 1827   if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
 1828     int dst_offset = ra_->reg2offset(dst_lo);
 1829     if (src_hi != OptoReg::Bad) {
 1830       assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
 1831              "expected same type of move for high parts");
 1832       size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
 1833     } else {
 1834       size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
 1835     }
 1836     return size;
 1837   }
 1838 
 1839   // Check for integer load.
 1840   if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
 1841     int src_offset = ra_->reg2offset(src_lo);
 1842     if (src_hi != OptoReg::Bad) {
 1843       assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
 1844              "expected same type of move for high parts");
 1845       size += ld_st_helper(masm, "LD  ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
 1846     } else {
 1847       size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
 1848     }
 1849     return size;
 1850   }
 1851 
 1852   // Check for float reg-reg copy.
 1853   if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1854     if (masm) {
 1855       FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
 1856       FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
 1857       __ fmr(Rdst, Rsrc);
 1858     }
 1859 #ifndef PRODUCT
 1860     else if (!do_size) {
 1861       st->print("%-7s %s, %s \t// spill copy", "FMR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 1862     }
 1863 #endif
 1864     return 4;
 1865   }
 1866 
 1867   // Check for float store.
 1868   if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1869     int dst_offset = ra_->reg2offset(dst_lo);
 1870     if (src_hi != OptoReg::Bad) {
 1871       assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
 1872              "expected same type of move for high parts");
 1873       size += ld_st_helper(masm, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
 1874     } else {
 1875       size += ld_st_helper(masm, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
 1876     }
 1877     return size;
 1878   }
 1879 
 1880   // Check for float load.
 1881   if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
 1882     int src_offset = ra_->reg2offset(src_lo);
 1883     if (src_hi != OptoReg::Bad) {
 1884       assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
 1885              "expected same type of move for high parts");
 1886       size += ld_st_helper(masm, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
 1887     } else {
 1888       size += ld_st_helper(masm, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
 1889     }
 1890     return size;
 1891   }
 1892 
 1893   // --------------------------------------------------------------------
 1894   // Check for hi bits still needing moving. Only happens for misaligned
 1895   // arguments to native calls.
 1896   if (src_hi == dst_hi)
 1897     return size;               // Self copy; no move.
 1898 
 1899   assert(src_hi_rc != rc_bad && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad");
 1900   ShouldNotReachHere(); // Unimplemented
 1901   return 0;
 1902 }
 1903 
 1904 #ifndef PRODUCT
 1905 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1906   if (!ra_)
 1907     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1908   else
 1909     implementation(nullptr, ra_, false, st);
 1910 }
 1911 #endif
 1912 
 1913 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1914   implementation(masm, ra_, false, nullptr);
 1915 }
 1916 
 1917 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1918   return implementation(nullptr, ra_, true, nullptr);
 1919 }
 1920 
 1921 #ifndef PRODUCT
 1922 void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1923   st->print("NOP \t// %d nops to pad for loops or prefixed instructions.", _count);
 1924 }
 1925 #endif
 1926 
 1927 void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *) const {
 1928   // _count contains the number of nops needed for padding.
 1929   for (int i = 0; i < _count; i++) {
 1930     __ nop();
 1931   }
 1932 }
 1933 
 1934 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
 1935   return _count * 4;
 1936 }
 1937 
 1938 #ifndef PRODUCT
 1939 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1940   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1941   char reg_str[128];
 1942   ra_->dump_register(this, reg_str, sizeof(reg_str));
 1943   st->print("ADDI    %s, SP, %d \t// box node", reg_str, offset);
 1944 }
 1945 #endif
 1946 
 1947 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1948   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1949   int reg    = ra_->get_encode(this);
 1950 
 1951   if (Assembler::is_simm(offset, 16)) {
 1952     __ addi(as_Register(reg), R1, offset);
 1953   } else {
 1954     ShouldNotReachHere();
 1955   }
 1956 }
 1957 
 1958 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1959   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1960   return 4;
 1961 }
 1962 
 1963 #ifndef PRODUCT
 1964 void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1965   st->print_cr("---- MachUEPNode ----");
 1966   st->print_cr("...");
 1967 }
 1968 #endif
 1969 
 1970 void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1971   // This is the unverified entry point.
 1972   __ ic_check(CodeEntryAlignment);
 1973   // Argument is valid and klass is as expected, continue.
 1974 }
 1975 
 1976 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
 1977   // Variable size. Determine dynamically.
 1978   return MachNode::size(ra_);
 1979 }
 1980 
 1981 //=============================================================================
 1982 
 1983 %} // interrupt source
 1984 
 1985 source_hpp %{ // Header information of the source block.
 1986 
 1987 class HandlerImpl {
 1988 
 1989  public:
 1990 
 1991   static int emit_exception_handler(C2_MacroAssembler *masm);
 1992   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1993 
 1994   static uint size_exception_handler() {
 1995     // The exception_handler is a b64_patchable.
 1996     return MacroAssembler::b64_patchable_size;
 1997   }
 1998 
 1999   static uint size_deopt_handler() {
 2000     // The deopt_handler is a bl64_patchable.
 2001     return MacroAssembler::bl64_patchable_size;
 2002   }
 2003 
 2004 };
 2005 
 2006 class Node::PD {
 2007 public:
 2008   enum NodeFlags {
 2009     _last_flag = Node::_last_flag
 2010   };
 2011 };
 2012 
 2013 %} // end source_hpp
 2014 
 2015 source %{
 2016 
 2017 int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
 2018   address base = __ start_a_stub(size_exception_handler());
 2019   if (base == nullptr) {
 2020     ciEnv::current()->record_failure("CodeCache is full");
 2021     return 0;  // CodeBuffer::expand failed
 2022   }
 2023 
 2024   int offset = __ offset();
 2025   __ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
 2026                        relocInfo::runtime_call_type);
 2027   assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
 2028   __ end_a_stub();
 2029 
 2030   return offset;
 2031 }
 2032 
 2033 // The deopt_handler is like the exception handler, but it calls to
 2034 // the deoptimization blob instead of jumping to the exception blob.
 2035 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
 2036   address base = __ start_a_stub(size_deopt_handler());
 2037   if (base == nullptr) {
 2038     ciEnv::current()->record_failure("CodeCache is full");
 2039     return 0;  // CodeBuffer::expand failed
 2040   }
 2041 
 2042   int offset = __ offset();
 2043   __ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
 2044                         relocInfo::runtime_call_type);
 2045   assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
 2046   __ end_a_stub();
 2047 
 2048   return offset;
 2049 }
 2050 
 2051 //=============================================================================
 2052 
 2053 // Use a frame slots bias for frameless methods if accessing the stack.
 2054 static int frame_slots_bias(int reg_enc, PhaseRegAlloc* ra_) {
 2055   if (as_Register(reg_enc) == R1_SP) {
 2056     return 0; // TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes();
 2057   }
 2058   return 0;
 2059 }
 2060 
 2061 bool Matcher::match_rule_supported(int opcode) {
 2062   if (!has_match_rule(opcode)) {
 2063     return false; // no match rule present
 2064   }
 2065 
 2066   switch (opcode) {
 2067     case Op_SqrtD:
 2068       return VM_Version::has_fsqrt();
 2069     case Op_RoundDoubleMode:
 2070       return VM_Version::has_vsx();
 2071     case Op_CountLeadingZerosI:
 2072     case Op_CountLeadingZerosL:
 2073       return UseCountLeadingZerosInstructionsPPC64;
 2074     case Op_CountTrailingZerosI:
 2075     case Op_CountTrailingZerosL:
 2076       return (UseCountLeadingZerosInstructionsPPC64 || UseCountTrailingZerosInstructionsPPC64);
 2077     case Op_PopCountI:
 2078     case Op_PopCountL:
 2079       return (UsePopCountInstruction && VM_Version::has_popcntw());
 2080 
 2081     case Op_AddVB:
 2082     case Op_AddVS:
 2083     case Op_AddVI:
 2084     case Op_AddVF:
 2085     case Op_AddVD:
 2086     case Op_SubVB:
 2087     case Op_SubVS:
 2088     case Op_SubVI:
 2089     case Op_SubVF:
 2090     case Op_SubVD:
 2091     case Op_MulVS:
 2092     case Op_MulVF:
 2093     case Op_MulVD:
 2094     case Op_DivVF:
 2095     case Op_DivVD:
 2096     case Op_AbsVF:
 2097     case Op_AbsVD:
 2098     case Op_NegVF:
 2099     case Op_NegVD:
 2100     case Op_SqrtVF:
 2101     case Op_SqrtVD:
 2102     case Op_AddVL:
 2103     case Op_SubVL:
 2104     case Op_MulVI:
 2105     case Op_RoundDoubleModeV:
 2106       return SuperwordUseVSX;
 2107     case Op_PopCountVI:
 2108       return (SuperwordUseVSX && UsePopCountInstruction);
 2109     case Op_FmaF:
 2110     case Op_FmaD:
 2111       return UseFMA;
 2112     case Op_FmaVF:
 2113     case Op_FmaVD:
 2114       return (SuperwordUseVSX && UseFMA);
 2115 
 2116     case Op_Digit:
 2117       return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isDigit);
 2118     case Op_LowerCase:
 2119       return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isLowerCase);
 2120     case Op_UpperCase:
 2121       return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isUpperCase);
 2122     case Op_Whitespace:
 2123       return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isWhitespace);
 2124 
 2125     case Op_CacheWB:
 2126     case Op_CacheWBPreSync:
 2127     case Op_CacheWBPostSync:
 2128       return VM_Version::supports_data_cache_line_flush();
 2129   }
 2130 
 2131   return true; // Per default match rules are supported.
 2132 }
 2133 
 2134 bool Matcher::match_rule_supported_auto_vectorization(int opcode, int vlen, BasicType bt) {
 2135   return match_rule_supported_vector(opcode, vlen, bt);
 2136 }
 2137 
 2138 bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2139   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
 2140     return false;
 2141   }
 2142   return true; // Per default match rules are supported.
 2143 }
 2144 
 2145 bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
 2146   return false;
 2147 }
 2148 
 2149 bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
 2150   return false;
 2151 }
 2152 
 2153 const RegMask* Matcher::predicate_reg_mask(void) {
 2154   return nullptr;
 2155 }
 2156 
 2157 // Vector calling convention not yet implemented.
 2158 bool Matcher::supports_vector_calling_convention(void) {
 2159   return false;
 2160 }
 2161 
 2162 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2163   Unimplemented();
 2164   return OptoRegPair(0, 0);
 2165 }
 2166 
 2167 // Vector width in bytes.
 2168 int Matcher::vector_width_in_bytes(BasicType bt) {
 2169   if (SuperwordUseVSX) {
 2170     assert(MaxVectorSize == 16, "");
 2171     return 16;
 2172   } else {
 2173     assert(MaxVectorSize == 8, "");
 2174     return 8;
 2175   }
 2176 }
 2177 
 2178 // Vector ideal reg.
 2179 uint Matcher::vector_ideal_reg(int size) {
 2180   if (SuperwordUseVSX) {
 2181     assert(MaxVectorSize == 16 && size == 16, "");
 2182     return Op_VecX;
 2183   } else {
 2184     assert(MaxVectorSize == 8 && size == 8, "");
 2185     return Op_RegL;
 2186   }
 2187 }
 2188 
 2189 // Limits on vector size (number of elements) loaded into vector.
 2190 int Matcher::max_vector_size(const BasicType bt) {
 2191   assert(is_java_primitive(bt), "only primitive type vectors");
 2192   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2193 }
 2194 
 2195 int Matcher::min_vector_size(const BasicType bt) {
 2196   return max_vector_size(bt); // Same as max.
 2197 }
 2198 
 2199 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2200   return Matcher::max_vector_size(bt);
 2201 }
 2202 
 2203 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2204   return -1;
 2205 }
 2206 
 2207 // RETURNS: whether this branch offset is short enough that a short
 2208 // branch can be used.
 2209 //
 2210 // If the platform does not provide any short branch variants, then
 2211 // this method should return `false' for offset 0.
 2212 //
 2213 // `Compile::Fill_buffer' will decide on basis of this information
 2214 // whether to do the pass `Compile::Shorten_branches' at all.
 2215 //
 2216 // And `Compile::Shorten_branches' will decide on basis of this
 2217 // information whether to replace particular branch sites by short
 2218 // ones.
 2219 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2220   // Is the offset within the range of a ppc64 pc relative branch?
 2221   bool b;
 2222 
 2223   const int safety_zone = 3 * BytesPerInstWord;
 2224   b = Assembler::is_simm((offset<0 ? offset-safety_zone : offset+safety_zone),
 2225                          29 - 16 + 1 + 2);
 2226   return b;
 2227 }
 2228 
 2229 /* TODO: PPC port
 2230 // Make a new machine dependent decode node (with its operands).
 2231 MachTypeNode *Matcher::make_decode_node() {
 2232   assert(CompressedOops::base() == nullptr && CompressedOops::shift() == 0,
 2233          "This method is only implemented for unscaled cOops mode so far");
 2234   MachTypeNode *decode = new decodeN_unscaledNode();
 2235   decode->set_opnd_array(0, new iRegPdstOper());
 2236   decode->set_opnd_array(1, new iRegNsrcOper());
 2237   return decode;
 2238 }
 2239 */
 2240 
 2241 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2242   ShouldNotReachHere(); // generic vector operands not supported
 2243   return nullptr;
 2244 }
 2245 
 2246 bool Matcher::is_reg2reg_move(MachNode* m) {
 2247   ShouldNotReachHere();  // generic vector operands not supported
 2248   return false;
 2249 }
 2250 
 2251 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2252   ShouldNotReachHere();  // generic vector operands not supported
 2253   return false;
 2254 }
 2255 
 2256 // Constants for c2c and c calling conventions.
 2257 
 2258 const MachRegisterNumbers iarg_reg[8] = {
 2259   R3_num, R4_num, R5_num, R6_num,
 2260   R7_num, R8_num, R9_num, R10_num
 2261 };
 2262 
 2263 const MachRegisterNumbers farg_reg[13] = {
 2264   F1_num, F2_num, F3_num, F4_num,
 2265   F5_num, F6_num, F7_num, F8_num,
 2266   F9_num, F10_num, F11_num, F12_num,
 2267   F13_num
 2268 };
 2269 
 2270 const MachRegisterNumbers vsarg_reg[64] = {
 2271   VSR0_num, VSR1_num, VSR2_num, VSR3_num,
 2272   VSR4_num, VSR5_num, VSR6_num, VSR7_num,
 2273   VSR8_num, VSR9_num, VSR10_num, VSR11_num,
 2274   VSR12_num, VSR13_num, VSR14_num, VSR15_num,
 2275   VSR16_num, VSR17_num, VSR18_num, VSR19_num,
 2276   VSR20_num, VSR21_num, VSR22_num, VSR23_num,
 2277   VSR24_num, VSR23_num, VSR24_num, VSR25_num,
 2278   VSR28_num, VSR29_num, VSR30_num, VSR31_num,
 2279   VSR32_num, VSR33_num, VSR34_num, VSR35_num,
 2280   VSR36_num, VSR37_num, VSR38_num, VSR39_num,
 2281   VSR40_num, VSR41_num, VSR42_num, VSR43_num,
 2282   VSR44_num, VSR45_num, VSR46_num, VSR47_num,
 2283   VSR48_num, VSR49_num, VSR50_num, VSR51_num,
 2284   VSR52_num, VSR53_num, VSR54_num, VSR55_num,
 2285   VSR56_num, VSR57_num, VSR58_num, VSR59_num,
 2286   VSR60_num, VSR61_num, VSR62_num, VSR63_num
 2287 };
 2288 
 2289 const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]);
 2290 
 2291 const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]);
 2292 
 2293 const int num_vsarg_registers = sizeof(vsarg_reg) / sizeof(vsarg_reg[0]);
 2294 
 2295 // Return whether or not this register is ever used as an argument. This
 2296 // function is used on startup to build the trampoline stubs in generateOptoStub.
 2297 // Registers not mentioned will be killed by the VM call in the trampoline, and
 2298 // arguments in those registers not be available to the callee.
 2299 bool Matcher::can_be_java_arg(int reg) {
 2300   // We return true for all registers contained in iarg_reg[] and
 2301   // farg_reg[] and their virtual halves.
 2302   // We must include the virtual halves in order to get STDs and LDs
 2303   // instead of STWs and LWs in the trampoline stubs.
 2304 
 2305   if (   reg == R3_num  || reg == R3_H_num
 2306       || reg == R4_num  || reg == R4_H_num
 2307       || reg == R5_num  || reg == R5_H_num
 2308       || reg == R6_num  || reg == R6_H_num
 2309       || reg == R7_num  || reg == R7_H_num
 2310       || reg == R8_num  || reg == R8_H_num
 2311       || reg == R9_num  || reg == R9_H_num
 2312       || reg == R10_num || reg == R10_H_num)
 2313     return true;
 2314 
 2315   if (   reg == F1_num  || reg == F1_H_num
 2316       || reg == F2_num  || reg == F2_H_num
 2317       || reg == F3_num  || reg == F3_H_num
 2318       || reg == F4_num  || reg == F4_H_num
 2319       || reg == F5_num  || reg == F5_H_num
 2320       || reg == F6_num  || reg == F6_H_num
 2321       || reg == F7_num  || reg == F7_H_num
 2322       || reg == F8_num  || reg == F8_H_num
 2323       || reg == F9_num  || reg == F9_H_num
 2324       || reg == F10_num || reg == F10_H_num
 2325       || reg == F11_num || reg == F11_H_num
 2326       || reg == F12_num || reg == F12_H_num
 2327       || reg == F13_num || reg == F13_H_num)
 2328     return true;
 2329 
 2330   return false;
 2331 }
 2332 
 2333 bool Matcher::is_spillable_arg(int reg) {
 2334   return can_be_java_arg(reg);
 2335 }
 2336 
 2337 uint Matcher::int_pressure_limit()
 2338 {
 2339   return (INTPRESSURE == -1) ? 26 : INTPRESSURE;
 2340 }
 2341 
 2342 uint Matcher::float_pressure_limit()
 2343 {
 2344   return (FLOATPRESSURE == -1) ? 28 : FLOATPRESSURE;
 2345 }
 2346 
 2347 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2348   return false;
 2349 }
 2350 
 2351 // Register for DIVI projection of divmodI.
 2352 RegMask Matcher::divI_proj_mask() {
 2353   ShouldNotReachHere();
 2354   return RegMask();
 2355 }
 2356 
 2357 // Register for MODI projection of divmodI.
 2358 RegMask Matcher::modI_proj_mask() {
 2359   ShouldNotReachHere();
 2360   return RegMask();
 2361 }
 2362 
 2363 // Register for DIVL projection of divmodL.
 2364 RegMask Matcher::divL_proj_mask() {
 2365   ShouldNotReachHere();
 2366   return RegMask();
 2367 }
 2368 
 2369 // Register for MODL projection of divmodL.
 2370 RegMask Matcher::modL_proj_mask() {
 2371   ShouldNotReachHere();
 2372   return RegMask();
 2373 }
 2374 
 2375 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2376   return RegMask();
 2377 }
 2378 
 2379 %}
 2380 
 2381 //----------ENCODING BLOCK-----------------------------------------------------
 2382 // This block specifies the encoding classes used by the compiler to output
 2383 // byte streams. Encoding classes are parameterized macros used by
 2384 // Machine Instruction Nodes in order to generate the bit encoding of the
 2385 // instruction. Operands specify their base encoding interface with the
 2386 // interface keyword. There are currently supported four interfaces,
 2387 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
 2388 // operand to generate a function which returns its register number when
 2389 // queried. CONST_INTER causes an operand to generate a function which
 2390 // returns the value of the constant when queried. MEMORY_INTER causes an
 2391 // operand to generate four functions which return the Base Register, the
 2392 // Index Register, the Scale Value, and the Offset Value of the operand when
 2393 // queried. COND_INTER causes an operand to generate six functions which
 2394 // return the encoding code (ie - encoding bits for the instruction)
 2395 // associated with each basic boolean condition for a conditional instruction.
 2396 //
 2397 // Instructions specify two basic values for encoding. Again, a function
 2398 // is available to check if the constant displacement is an oop. They use the
 2399 // ins_encode keyword to specify their encoding classes (which must be
 2400 // a sequence of enc_class names, and their parameters, specified in
 2401 // the encoding block), and they use the
 2402 // opcode keyword to specify, in order, their primary, secondary, and
 2403 // tertiary opcode. Only the opcode sections which a particular instruction
 2404 // needs for encoding need to be specified.
 2405 encode %{
 2406   enc_class enc_unimplemented %{
 2407     __ unimplemented("Unimplemented mach node encoding in AD file.", 13);
 2408   %}
 2409 
 2410   enc_class enc_untested %{
 2411 #ifdef ASSERT
 2412     __ untested("Untested mach node encoding in AD file.");
 2413 #else
 2414 #endif
 2415   %}
 2416 
 2417   enc_class enc_lbz(iRegIdst dst, memory mem) %{
 2418     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2419     __ lbz($dst$$Register, Idisp, $mem$$base$$Register);
 2420   %}
 2421 
 2422   // Load acquire.
 2423   enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{
 2424     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2425     __ lbz($dst$$Register, Idisp, $mem$$base$$Register);
 2426     __ twi_0($dst$$Register);
 2427     __ isync();
 2428   %}
 2429 
 2430   enc_class enc_lhz(iRegIdst dst, memory mem) %{
 2431     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2432     __ lhz($dst$$Register, Idisp, $mem$$base$$Register);
 2433   %}
 2434 
 2435   // Load acquire.
 2436   enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{
 2437     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2438     __ lhz($dst$$Register, Idisp, $mem$$base$$Register);
 2439     __ twi_0($dst$$Register);
 2440     __ isync();
 2441   %}
 2442 
 2443   enc_class enc_lwz(iRegIdst dst, memory mem) %{
 2444     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2445     __ lwz($dst$$Register, Idisp, $mem$$base$$Register);
 2446   %}
 2447 
 2448   // Load acquire.
 2449   enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{
 2450     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2451     __ lwz($dst$$Register, Idisp, $mem$$base$$Register);
 2452     __ twi_0($dst$$Register);
 2453     __ isync();
 2454   %}
 2455 
 2456   enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{
 2457     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2458     // Operand 'ds' requires 4-alignment.
 2459     assert((Idisp & 0x3) == 0, "unaligned offset");
 2460     __ ld($dst$$Register, Idisp, $mem$$base$$Register);
 2461   %}
 2462 
 2463   // Load acquire.
 2464   enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{
 2465     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2466     // Operand 'ds' requires 4-alignment.
 2467     assert((Idisp & 0x3) == 0, "unaligned offset");
 2468     __ ld($dst$$Register, Idisp, $mem$$base$$Register);
 2469     __ twi_0($dst$$Register);
 2470     __ isync();
 2471   %}
 2472 
 2473   enc_class enc_lfd(RegF dst, memory mem) %{
 2474     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2475     __ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
 2476   %}
 2477 
 2478   enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc) %{
 2479     int toc_offset = 0;
 2480 
 2481     address const_toc_addr;
 2482     // Create a non-oop constant, no relocation needed.
 2483     // If it is an IC, it has a virtual_call_Relocation.
 2484     const_toc_addr = __ long_constant((jlong)$src$$constant);
 2485     if (const_toc_addr == nullptr) {
 2486       ciEnv::current()->record_out_of_memory_failure();
 2487       return;
 2488     }
 2489 
 2490     // Get the constant's TOC offset.
 2491     toc_offset = __ offset_to_method_toc(const_toc_addr);
 2492 
 2493     // Keep the current instruction offset in mind.
 2494     ((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
 2495 
 2496     __ ld($dst$$Register, toc_offset, $toc$$Register);
 2497   %}
 2498 
 2499   enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src) %{
 2500     if (!ra_->C->output()->in_scratch_emit_size()) {
 2501       address const_toc_addr;
 2502       // Create a non-oop constant, no relocation needed.
 2503       // If it is an IC, it has a virtual_call_Relocation.
 2504       const_toc_addr = __ long_constant((jlong)$src$$constant);
 2505       if (const_toc_addr == nullptr) {
 2506         ciEnv::current()->record_out_of_memory_failure();
 2507         return;
 2508       }
 2509 
 2510       // Get the constant's TOC offset.
 2511       const int toc_offset = __ offset_to_method_toc(const_toc_addr);
 2512       // Store the toc offset of the constant.
 2513       ((loadConL_hiNode*)this)->_const_toc_offset = toc_offset;
 2514 
 2515       // Also keep the current instruction offset in mind.
 2516       ((loadConL_hiNode*)this)->_cbuf_insts_offset = __ offset();
 2517     }
 2518 
 2519     __ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
 2520   %}
 2521 
 2522 %} // encode
 2523 
 2524 source %{
 2525 
 2526 typedef struct {
 2527   loadConL_hiNode *_large_hi;
 2528   loadConL_loNode *_large_lo;
 2529   loadConLNode    *_small;
 2530   MachNode        *_last;
 2531 } loadConLNodesTuple;
 2532 
 2533 loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
 2534                                              OptoReg::Name reg_second, OptoReg::Name reg_first) {
 2535   loadConLNodesTuple nodes;
 2536 
 2537   const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
 2538   if (large_constant_pool) {
 2539     // Create new nodes.
 2540     loadConL_hiNode *m1 = new loadConL_hiNode();
 2541     loadConL_loNode *m2 = new loadConL_loNode();
 2542 
 2543     // inputs for new nodes
 2544     m1->add_req(nullptr, toc);
 2545     m2->add_req(nullptr, m1);
 2546 
 2547     // operands for new nodes
 2548     m1->_opnds[0] = new iRegLdstOper(); // dst
 2549     m1->_opnds[1] = immSrc;             // src
 2550     m1->_opnds[2] = new iRegPdstOper(); // toc
 2551     m2->_opnds[0] = new iRegLdstOper(); // dst
 2552     m2->_opnds[1] = immSrc;             // src
 2553     m2->_opnds[2] = new iRegLdstOper(); // base
 2554 
 2555     // Initialize ins_attrib TOC fields.
 2556     m1->_const_toc_offset = -1;
 2557     m2->_const_toc_offset_hi_node = m1;
 2558 
 2559     // Initialize ins_attrib instruction offset.
 2560     m1->_cbuf_insts_offset = -1;
 2561 
 2562     // register allocation for new nodes
 2563     ra_->set_pair(m1->_idx, reg_second, reg_first);
 2564     ra_->set_pair(m2->_idx, reg_second, reg_first);
 2565 
 2566     // Create result.
 2567     nodes._large_hi = m1;
 2568     nodes._large_lo = m2;
 2569     nodes._small = nullptr;
 2570     nodes._last = nodes._large_lo;
 2571     assert(m2->bottom_type()->isa_long(), "must be long");
 2572   } else {
 2573     loadConLNode *m2 = new loadConLNode();
 2574 
 2575     // inputs for new nodes
 2576     m2->add_req(nullptr, toc);
 2577 
 2578     // operands for new nodes
 2579     m2->_opnds[0] = new iRegLdstOper(); // dst
 2580     m2->_opnds[1] = immSrc;             // src
 2581     m2->_opnds[2] = new iRegPdstOper(); // toc
 2582 
 2583     // Initialize ins_attrib instruction offset.
 2584     m2->_cbuf_insts_offset = -1;
 2585 
 2586     // register allocation for new nodes
 2587     ra_->set_pair(m2->_idx, reg_second, reg_first);
 2588 
 2589     // Create result.
 2590     nodes._large_hi = nullptr;
 2591     nodes._large_lo = nullptr;
 2592     nodes._small = m2;
 2593     nodes._last = nodes._small;
 2594     assert(m2->bottom_type()->isa_long(), "must be long");
 2595   }
 2596 
 2597   return nodes;
 2598 }
 2599 
 2600 typedef struct {
 2601   loadConL_hiNode *_large_hi;
 2602   loadConL_loNode *_large_lo;
 2603   mtvsrdNode      *_moved;
 2604   xxspltdNode     *_replicated;
 2605   loadConLNode    *_small;
 2606   MachNode        *_last;
 2607 } loadConLReplicatedNodesTuple;
 2608 
 2609 loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
 2610                                                  vecXOper *dst, immI_0Oper *zero,
 2611                                                  OptoReg::Name reg_second, OptoReg::Name reg_first,
 2612                                                  OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) {
 2613   loadConLReplicatedNodesTuple nodes;
 2614 
 2615   const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
 2616   if (large_constant_pool) {
 2617     // Create new nodes.
 2618     loadConL_hiNode *m1 = new  loadConL_hiNode();
 2619     loadConL_loNode *m2 = new  loadConL_loNode();
 2620     mtvsrdNode *m3 = new  mtvsrdNode();
 2621     xxspltdNode *m4 = new  xxspltdNode();
 2622 
 2623     // inputs for new nodes
 2624     m1->add_req(nullptr, toc);
 2625     m2->add_req(nullptr, m1);
 2626     m3->add_req(nullptr, m2);
 2627     m4->add_req(nullptr, m3);
 2628 
 2629     // operands for new nodes
 2630     m1->_opnds[0] = new  iRegLdstOper(); // dst
 2631     m1->_opnds[1] = immSrc;              // src
 2632     m1->_opnds[2] = new  iRegPdstOper(); // toc
 2633 
 2634     m2->_opnds[0] = new  iRegLdstOper(); // dst
 2635     m2->_opnds[1] = immSrc;              // src
 2636     m2->_opnds[2] = new  iRegLdstOper(); // base
 2637 
 2638     m3->_opnds[0] = new  vecXOper();     // dst
 2639     m3->_opnds[1] = new  iRegLdstOper(); // src
 2640 
 2641     m4->_opnds[0] = new  vecXOper();     // dst
 2642     m4->_opnds[1] = new  vecXOper();     // src
 2643     m4->_opnds[2] = zero;
 2644 
 2645     // Initialize ins_attrib TOC fields.
 2646     m1->_const_toc_offset = -1;
 2647     m2->_const_toc_offset_hi_node = m1;
 2648 
 2649     // Initialize ins_attrib instruction offset.
 2650     m1->_cbuf_insts_offset = -1;
 2651 
 2652     // register allocation for new nodes
 2653     ra_->set_pair(m1->_idx, reg_second, reg_first);
 2654     ra_->set_pair(m2->_idx, reg_second, reg_first);
 2655     ra_->set1(m3->_idx, reg_second);
 2656     ra_->set2(m3->_idx, reg_vec_first);
 2657     ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
 2658 
 2659     // Create result.
 2660     nodes._large_hi = m1;
 2661     nodes._large_lo = m2;
 2662     nodes._moved = m3;
 2663     nodes._replicated = m4;
 2664     nodes._small = nullptr;
 2665     nodes._last = nodes._replicated;
 2666     assert(m2->bottom_type()->isa_long(), "must be long");
 2667   } else {
 2668     loadConLNode *m2 = new  loadConLNode();
 2669     mtvsrdNode *m3 = new  mtvsrdNode();
 2670     xxspltdNode *m4 = new  xxspltdNode();
 2671 
 2672     // inputs for new nodes
 2673     m2->add_req(nullptr, toc);
 2674 
 2675     // operands for new nodes
 2676     m2->_opnds[0] = new  iRegLdstOper(); // dst
 2677     m2->_opnds[1] = immSrc;              // src
 2678     m2->_opnds[2] = new  iRegPdstOper(); // toc
 2679 
 2680     m3->_opnds[0] = new  vecXOper();     // dst
 2681     m3->_opnds[1] = new  iRegLdstOper(); // src
 2682 
 2683     m4->_opnds[0] = new  vecXOper();     // dst
 2684     m4->_opnds[1] = new  vecXOper();     // src
 2685     m4->_opnds[2] = zero;
 2686 
 2687     // Initialize ins_attrib instruction offset.
 2688     m2->_cbuf_insts_offset = -1;
 2689     ra_->set1(m3->_idx, reg_second);
 2690     ra_->set2(m3->_idx, reg_vec_first);
 2691     ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
 2692 
 2693     // register allocation for new nodes
 2694     ra_->set_pair(m2->_idx, reg_second, reg_first);
 2695 
 2696     // Create result.
 2697     nodes._large_hi = nullptr;
 2698     nodes._large_lo = nullptr;
 2699     nodes._small = m2;
 2700     nodes._moved = m3;
 2701     nodes._replicated = m4;
 2702     nodes._last = nodes._replicated;
 2703     assert(m2->bottom_type()->isa_long(), "must be long");
 2704   }
 2705 
 2706   return nodes;
 2707 }
 2708 
 2709 %} // source
 2710 
 2711 encode %{
 2712   // Postalloc expand emitter for loading a long constant from the method's TOC.
 2713   // Enc_class needed as consttanttablebase is not supported by postalloc
 2714   // expand.
 2715   enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
 2716     // Create new nodes.
 2717     loadConLNodesTuple loadConLNodes =
 2718       loadConLNodesTuple_create(ra_, n_toc, op_src,
 2719                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 2720 
 2721     // Push new nodes.
 2722     if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
 2723     if (loadConLNodes._last)     nodes->push(loadConLNodes._last);
 2724 
 2725     // some asserts
 2726     assert(nodes->length() >= 1, "must have created at least 1 node");
 2727     assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
 2728   %}
 2729 
 2730   enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc) %{
 2731     int toc_offset = 0;
 2732 
 2733     intptr_t val = $src$$constant;
 2734     relocInfo::relocType constant_reloc = $src->constant_reloc();  // src
 2735     address const_toc_addr;
 2736     RelocationHolder r; // Initializes type to none.
 2737     if (constant_reloc == relocInfo::oop_type) {
 2738       // Create an oop constant and a corresponding relocation.
 2739       AddressLiteral a = __ constant_oop_address((jobject)val);
 2740       const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
 2741       r = a.rspec();
 2742     } else if (constant_reloc == relocInfo::metadata_type) {
 2743       // Notify OOP recorder (don't need the relocation)
 2744       AddressLiteral a = __ constant_metadata_address((Metadata *)val);
 2745       const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
 2746     } else {
 2747       // Create a non-oop constant, no relocation needed.
 2748       const_toc_addr = __ long_constant((jlong)$src$$constant);
 2749     }
 2750 
 2751     if (const_toc_addr == nullptr) {
 2752       ciEnv::current()->record_out_of_memory_failure();
 2753       return;
 2754     }
 2755     __ relocate(r); // If set above.
 2756     // Get the constant's TOC offset.
 2757     toc_offset = __ offset_to_method_toc(const_toc_addr);
 2758 
 2759     __ ld($dst$$Register, toc_offset, $toc$$Register);
 2760   %}
 2761 
 2762   enc_class enc_load_long_constP_hi(iRegLdst dst, immP src, iRegLdst toc) %{
 2763     if (!ra_->C->output()->in_scratch_emit_size()) {
 2764       intptr_t val = $src$$constant;
 2765       relocInfo::relocType constant_reloc = $src->constant_reloc();  // src
 2766       address const_toc_addr;
 2767       RelocationHolder r; // Initializes type to none.
 2768       if (constant_reloc == relocInfo::oop_type) {
 2769         // Create an oop constant and a corresponding relocation.
 2770         AddressLiteral a = __ constant_oop_address((jobject)val);
 2771         const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
 2772         r = a.rspec();
 2773       } else if (constant_reloc == relocInfo::metadata_type) {
 2774         // Notify OOP recorder (don't need the relocation)
 2775         AddressLiteral a = __ constant_metadata_address((Metadata *)val);
 2776         const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
 2777       } else {  // non-oop pointers, e.g. card mark base, heap top
 2778         // Create a non-oop constant, no relocation needed.
 2779         const_toc_addr = __ long_constant((jlong)$src$$constant);
 2780       }
 2781 
 2782       if (const_toc_addr == nullptr) {
 2783         ciEnv::current()->record_out_of_memory_failure();
 2784         return;
 2785       }
 2786       __ relocate(r); // If set above.
 2787       // Get the constant's TOC offset.
 2788       const int toc_offset = __ offset_to_method_toc(const_toc_addr);
 2789       // Store the toc offset of the constant.
 2790       ((loadConP_hiNode*)this)->_const_toc_offset = toc_offset;
 2791     }
 2792 
 2793     __ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
 2794   %}
 2795 
 2796   // Postalloc expand emitter for loading a ptr constant from the method's TOC.
 2797   // Enc_class needed as consttanttablebase is not supported by postalloc
 2798   // expand.
 2799   enc_class postalloc_expand_load_ptr_constant(iRegPdst dst, immP src, iRegLdst toc) %{
 2800     const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
 2801     if (large_constant_pool) {
 2802       // Create new nodes.
 2803       loadConP_hiNode *m1 = new loadConP_hiNode();
 2804       loadConP_loNode *m2 = new loadConP_loNode();
 2805 
 2806       // inputs for new nodes
 2807       m1->add_req(nullptr, n_toc);
 2808       m2->add_req(nullptr, m1);
 2809 
 2810       // operands for new nodes
 2811       m1->_opnds[0] = new iRegPdstOper(); // dst
 2812       m1->_opnds[1] = op_src;             // src
 2813       m1->_opnds[2] = new iRegPdstOper(); // toc
 2814       m2->_opnds[0] = new iRegPdstOper(); // dst
 2815       m2->_opnds[1] = op_src;             // src
 2816       m2->_opnds[2] = new iRegLdstOper(); // base
 2817 
 2818       // Initialize ins_attrib TOC fields.
 2819       m1->_const_toc_offset = -1;
 2820       m2->_const_toc_offset_hi_node = m1;
 2821 
 2822       // Register allocation for new nodes.
 2823       ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2824       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2825 
 2826       nodes->push(m1);
 2827       nodes->push(m2);
 2828       assert(m2->bottom_type()->isa_ptr(), "must be ptr");
 2829     } else {
 2830       loadConPNode *m2 = new loadConPNode();
 2831 
 2832       // inputs for new nodes
 2833       m2->add_req(nullptr, n_toc);
 2834 
 2835       // operands for new nodes
 2836       m2->_opnds[0] = new iRegPdstOper(); // dst
 2837       m2->_opnds[1] = op_src;             // src
 2838       m2->_opnds[2] = new iRegPdstOper(); // toc
 2839 
 2840       // Register allocation for new nodes.
 2841       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2842 
 2843       nodes->push(m2);
 2844       assert(m2->bottom_type()->isa_ptr(), "must be ptr");
 2845     }
 2846   %}
 2847 
 2848   // Enc_class needed as consttanttablebase is not supported by postalloc
 2849   // expand.
 2850   enc_class postalloc_expand_load_float_constant(regF dst, immF src, iRegLdst toc) %{
 2851     bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
 2852 
 2853     MachNode *m2;
 2854     if (large_constant_pool) {
 2855       m2 = new loadConFCompNode();
 2856     } else {
 2857       m2 = new loadConFNode();
 2858     }
 2859     // inputs for new nodes
 2860     m2->add_req(nullptr, n_toc);
 2861 
 2862     // operands for new nodes
 2863     m2->_opnds[0] = op_dst;
 2864     m2->_opnds[1] = op_src;
 2865     m2->_opnds[2] = new iRegPdstOper(); // constanttablebase
 2866 
 2867     // register allocation for new nodes
 2868     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2869     nodes->push(m2);
 2870   %}
 2871 
 2872   // Enc_class needed as consttanttablebase is not supported by postalloc
 2873   // expand.
 2874   enc_class postalloc_expand_load_double_constant(regD dst, immD src, iRegLdst toc) %{
 2875     bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
 2876 
 2877     MachNode *m2;
 2878     if (large_constant_pool) {
 2879       m2 = new loadConDCompNode();
 2880     } else {
 2881       m2 = new loadConDNode();
 2882     }
 2883     // inputs for new nodes
 2884     m2->add_req(nullptr, n_toc);
 2885 
 2886     // operands for new nodes
 2887     m2->_opnds[0] = op_dst;
 2888     m2->_opnds[1] = op_src;
 2889     m2->_opnds[2] = new iRegPdstOper(); // constanttablebase
 2890 
 2891     // register allocation for new nodes
 2892     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2893     nodes->push(m2);
 2894   %}
 2895 
 2896   enc_class enc_stw(iRegIsrc src, memory mem) %{
 2897     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2898     __ stw($src$$Register, Idisp, $mem$$base$$Register);
 2899   %}
 2900 
 2901   enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{
 2902     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2903     // Operand 'ds' requires 4-alignment.
 2904     assert((Idisp & 0x3) == 0, "unaligned offset");
 2905     __ std($src$$Register, Idisp, $mem$$base$$Register);
 2906   %}
 2907 
 2908   enc_class enc_stfs(RegF src, memory mem) %{
 2909     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2910     __ stfs($src$$FloatRegister, Idisp, $mem$$base$$Register);
 2911   %}
 2912 
 2913   enc_class enc_stfd(RegF src, memory mem) %{
 2914     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 2915     __ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
 2916   %}
 2917 
 2918   enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
 2919 
 2920     if (VM_Version::has_isel()) {
 2921       // use isel instruction with Power 7
 2922       cmpP_reg_imm16Node *n_compare  = new cmpP_reg_imm16Node();
 2923       encodeP_subNode    *n_sub_base = new encodeP_subNode();
 2924       encodeP_shiftNode  *n_shift    = new encodeP_shiftNode();
 2925       cond_set_0_oopNode *n_cond_set = new cond_set_0_oopNode();
 2926 
 2927       n_compare->add_req(n_region, n_src);
 2928       n_compare->_opnds[0] = op_crx;
 2929       n_compare->_opnds[1] = op_src;
 2930       n_compare->_opnds[2] = new immL16Oper(0);
 2931 
 2932       n_sub_base->add_req(n_region, n_src);
 2933       n_sub_base->_opnds[0] = op_dst;
 2934       n_sub_base->_opnds[1] = op_src;
 2935       n_sub_base->_bottom_type = _bottom_type;
 2936 
 2937       n_shift->add_req(n_region, n_sub_base);
 2938       n_shift->_opnds[0] = op_dst;
 2939       n_shift->_opnds[1] = op_dst;
 2940       n_shift->_bottom_type = _bottom_type;
 2941 
 2942       n_cond_set->add_req(n_region, n_compare, n_shift);
 2943       n_cond_set->_opnds[0] = op_dst;
 2944       n_cond_set->_opnds[1] = op_crx;
 2945       n_cond_set->_opnds[2] = op_dst;
 2946       n_cond_set->_bottom_type = _bottom_type;
 2947 
 2948       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
 2949       ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2950       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2951       ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2952 
 2953       nodes->push(n_compare);
 2954       nodes->push(n_sub_base);
 2955       nodes->push(n_shift);
 2956       nodes->push(n_cond_set);
 2957 
 2958     } else {
 2959       // before Power 7
 2960       moveRegNode        *n_move     = new moveRegNode();
 2961       cmpP_reg_imm16Node *n_compare  = new cmpP_reg_imm16Node();
 2962       encodeP_shiftNode  *n_shift    = new encodeP_shiftNode();
 2963       cond_sub_baseNode  *n_sub_base = new cond_sub_baseNode();
 2964 
 2965       n_move->add_req(n_region, n_src);
 2966       n_move->_opnds[0] = op_dst;
 2967       n_move->_opnds[1] = op_src;
 2968       ra_->set_oop(n_move, true); // Until here, 'n_move' still produces an oop.
 2969 
 2970       n_compare->add_req(n_region, n_src);
 2971       n_compare->add_prec(n_move);
 2972 
 2973       n_compare->_opnds[0] = op_crx;
 2974       n_compare->_opnds[1] = op_src;
 2975       n_compare->_opnds[2] = new immL16Oper(0);
 2976 
 2977       n_sub_base->add_req(n_region, n_compare, n_src);
 2978       n_sub_base->_opnds[0] = op_dst;
 2979       n_sub_base->_opnds[1] = op_crx;
 2980       n_sub_base->_opnds[2] = op_src;
 2981       n_sub_base->_bottom_type = _bottom_type;
 2982 
 2983       n_shift->add_req(n_region, n_sub_base);
 2984       n_shift->_opnds[0] = op_dst;
 2985       n_shift->_opnds[1] = op_dst;
 2986       n_shift->_bottom_type = _bottom_type;
 2987 
 2988       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2989       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
 2990       ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2991       ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 2992 
 2993       nodes->push(n_move);
 2994       nodes->push(n_compare);
 2995       nodes->push(n_sub_base);
 2996       nodes->push(n_shift);
 2997     }
 2998 
 2999     assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
 3000   %}
 3001 
 3002   enc_class postalloc_expand_encode_oop_not_null(iRegNdst dst, iRegPdst src) %{
 3003 
 3004     encodeP_subNode *n1 = new encodeP_subNode();
 3005     n1->add_req(n_region, n_src);
 3006     n1->_opnds[0] = op_dst;
 3007     n1->_opnds[1] = op_src;
 3008     n1->_bottom_type = _bottom_type;
 3009 
 3010     encodeP_shiftNode *n2 = new encodeP_shiftNode();
 3011     n2->add_req(n_region, n1);
 3012     n2->_opnds[0] = op_dst;
 3013     n2->_opnds[1] = op_dst;
 3014     n2->_bottom_type = _bottom_type;
 3015     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3016     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3017 
 3018     nodes->push(n1);
 3019     nodes->push(n2);
 3020     assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
 3021   %}
 3022 
 3023   enc_class postalloc_expand_decode_oop(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
 3024     decodeN_shiftNode *n_shift    = new decodeN_shiftNode();
 3025     cmpN_reg_imm0Node *n_compare  = new cmpN_reg_imm0Node();
 3026 
 3027     n_compare->add_req(n_region, n_src);
 3028     n_compare->_opnds[0] = op_crx;
 3029     n_compare->_opnds[1] = op_src;
 3030     n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
 3031 
 3032     n_shift->add_req(n_region, n_src);
 3033     n_shift->_opnds[0] = op_dst;
 3034     n_shift->_opnds[1] = op_src;
 3035     n_shift->_bottom_type = _bottom_type;
 3036 
 3037     if (VM_Version::has_isel()) {
 3038       // use isel instruction with Power 7
 3039 
 3040       decodeN_addNode *n_add_base = new decodeN_addNode();
 3041       n_add_base->add_req(n_region, n_shift);
 3042       n_add_base->_opnds[0] = op_dst;
 3043       n_add_base->_opnds[1] = op_dst;
 3044       n_add_base->_bottom_type = _bottom_type;
 3045 
 3046       cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
 3047       n_cond_set->add_req(n_region, n_compare, n_add_base);
 3048       n_cond_set->_opnds[0] = op_dst;
 3049       n_cond_set->_opnds[1] = op_crx;
 3050       n_cond_set->_opnds[2] = op_dst;
 3051       n_cond_set->_bottom_type = _bottom_type;
 3052 
 3053       assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
 3054       ra_->set_oop(n_cond_set, true);
 3055 
 3056       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3057       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
 3058       ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3059       ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3060 
 3061       nodes->push(n_compare);
 3062       nodes->push(n_shift);
 3063       nodes->push(n_add_base);
 3064       nodes->push(n_cond_set);
 3065 
 3066     } else {
 3067       // before Power 7
 3068       cond_add_baseNode *n_add_base = new cond_add_baseNode();
 3069 
 3070       n_add_base->add_req(n_region, n_compare, n_shift);
 3071       n_add_base->_opnds[0] = op_dst;
 3072       n_add_base->_opnds[1] = op_crx;
 3073       n_add_base->_opnds[2] = op_dst;
 3074       n_add_base->_bottom_type = _bottom_type;
 3075 
 3076       assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
 3077       ra_->set_oop(n_add_base, true);
 3078 
 3079       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3080       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
 3081       ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3082 
 3083       nodes->push(n_compare);
 3084       nodes->push(n_shift);
 3085       nodes->push(n_add_base);
 3086     }
 3087   %}
 3088 
 3089   enc_class postalloc_expand_decode_oop_not_null(iRegPdst dst, iRegNsrc src) %{
 3090     decodeN_shiftNode *n1 = new decodeN_shiftNode();
 3091     n1->add_req(n_region, n_src);
 3092     n1->_opnds[0] = op_dst;
 3093     n1->_opnds[1] = op_src;
 3094     n1->_bottom_type = _bottom_type;
 3095 
 3096     decodeN_addNode *n2 = new decodeN_addNode();
 3097     n2->add_req(n_region, n1);
 3098     n2->_opnds[0] = op_dst;
 3099     n2->_opnds[1] = op_dst;
 3100     n2->_bottom_type = _bottom_type;
 3101     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3102     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 3103 
 3104     assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
 3105     ra_->set_oop(n2, true);
 3106 
 3107     nodes->push(n1);
 3108     nodes->push(n2);
 3109   %}
 3110 
 3111   enc_class enc_cmove_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src, cmpOp cmp) %{
 3112     int cc        = $cmp$$cmpcode;
 3113     int flags_reg = $crx$$reg;
 3114     Label done;
 3115     assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
 3116     // Branch if not (cmp crx).
 3117     __ bc(cc_to_inverse_boint(cc), cc_to_biint(cc, flags_reg), done);
 3118     __ mr($dst$$Register, $src$$Register);
 3119     __ bind(done);
 3120   %}
 3121 
 3122   enc_class enc_cmove_imm(iRegIdst dst, flagsRegSrc crx, immI16 src, cmpOp cmp) %{
 3123     Label done;
 3124     assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
 3125     // Branch if not (cmp crx).
 3126     __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
 3127     __ li($dst$$Register, $src$$constant);
 3128     __ bind(done);
 3129   %}
 3130 
 3131   // This enc_class is needed so that scheduler gets proper
 3132   // input mapping for latency computation.
 3133   enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 3134     __ andc($dst$$Register, $src1$$Register, $src2$$Register);
 3135   %}
 3136 
 3137   enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
 3138     Label done;
 3139     __ cmpwi($crx$$CondRegister, $src$$Register, 0);
 3140     __ li($dst$$Register, $zero$$constant);
 3141     __ beq($crx$$CondRegister, done);
 3142     __ li($dst$$Register, $notzero$$constant);
 3143     __ bind(done);
 3144   %}
 3145 
 3146   enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
 3147     Label done;
 3148     __ cmpdi($crx$$CondRegister, $src$$Register, 0);
 3149     __ li($dst$$Register, $zero$$constant);
 3150     __ beq($crx$$CondRegister, done);
 3151     __ li($dst$$Register, $notzero$$constant);
 3152     __ bind(done);
 3153   %}
 3154 
 3155   enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL mem ) %{
 3156     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 3157     Label done;
 3158     __ bso($crx$$CondRegister, done);
 3159     __ ld($dst$$Register, Idisp, $mem$$base$$Register);
 3160     __ bind(done);
 3161   %}
 3162 
 3163   enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
 3164     Label done;
 3165     __ bso($crx$$CondRegister, done);
 3166     __ mffprd($dst$$Register, $src$$FloatRegister);
 3167     __ bind(done);
 3168   %}
 3169 
 3170   enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
 3171     Label d;   // dummy
 3172     __ bind(d);
 3173     Label* p = ($lbl$$label);
 3174     // `p' is `nullptr' when this encoding class is used only to
 3175     // determine the size of the encoded instruction.
 3176     Label& l = (nullptr == p)? d : *(p);
 3177     int cc = $cmp$$cmpcode;
 3178     int flags_reg = $crx$$reg;
 3179     assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
 3180     int bhint = Assembler::bhintNoHint;
 3181 
 3182     if (UseStaticBranchPredictionForUncommonPathsPPC64) {
 3183       if (_prob <= PROB_NEVER) {
 3184         bhint = Assembler::bhintIsNotTaken;
 3185       } else if (_prob >= PROB_ALWAYS) {
 3186         bhint = Assembler::bhintIsTaken;
 3187       }
 3188     }
 3189 
 3190     __ bc(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
 3191           cc_to_biint(cc, flags_reg),
 3192           l);
 3193   %}
 3194 
 3195   enc_class enc_bc_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
 3196     // The scheduler doesn't know about branch shortening, so we set the opcode
 3197     // to ppc64Opcode_bc in order to hide this detail from the scheduler.
 3198     Label d;    // dummy
 3199     __ bind(d);
 3200     Label* p = ($lbl$$label);
 3201     // `p' is `nullptr' when this encoding class is used only to
 3202     // determine the size of the encoded instruction.
 3203     Label& l = (nullptr == p)? d : *(p);
 3204     int cc = $cmp$$cmpcode;
 3205     int flags_reg = $crx$$reg;
 3206     int bhint = Assembler::bhintNoHint;
 3207 
 3208     if (UseStaticBranchPredictionForUncommonPathsPPC64) {
 3209       if (_prob <= PROB_NEVER) {
 3210         bhint = Assembler::bhintIsNotTaken;
 3211       } else if (_prob >= PROB_ALWAYS) {
 3212         bhint = Assembler::bhintIsTaken;
 3213       }
 3214     }
 3215 
 3216     // Tell the conditional far branch to optimize itself when being relocated.
 3217     __ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
 3218                   cc_to_biint(cc, flags_reg),
 3219                   l,
 3220                   MacroAssembler::bc_far_optimize_on_relocate);
 3221   %}
 3222 
 3223   // Postalloc expand emitter for loading a replicatef float constant from
 3224   // the method's TOC.
 3225   // Enc_class needed as consttanttablebase is not supported by postalloc
 3226   // expand.
 3227   enc_class postalloc_expand_load_replF_constant(iRegLdst dst, immF src, iRegLdst toc) %{
 3228     // Create new nodes.
 3229 
 3230     // Make an operand with the bit pattern to load as float.
 3231     immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
 3232 
 3233     loadConLNodesTuple loadConLNodes =
 3234       loadConLNodesTuple_create(ra_, n_toc, op_repl,
 3235                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 3236 
 3237     // Push new nodes.
 3238     if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
 3239     if (loadConLNodes._last)     nodes->push(loadConLNodes._last);
 3240 
 3241     assert(nodes->length() >= 1, "must have created at least 1 node");
 3242     assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
 3243   %}
 3244 
 3245   enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc, iRegLdst tmp) %{
 3246     // Create new nodes.
 3247 
 3248     // Make an operand with the bit pattern to load as float.
 3249     immLOper *op_repl = new  immLOper((jlong)replicate_immF(op_src->constantF()));
 3250     immI_0Oper *op_zero = new  immI_0Oper(0);
 3251 
 3252     loadConLReplicatedNodesTuple loadConLNodes =
 3253       loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
 3254                                 ra_->get_reg_second(n_tmp), ra_->get_reg_first(n_tmp),
 3255                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 3256 
 3257     // Push new nodes.
 3258     if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
 3259     if (loadConLNodes._large_lo) { nodes->push(loadConLNodes._large_lo); }
 3260     if (loadConLNodes._moved)    { nodes->push(loadConLNodes._moved); }
 3261     if (loadConLNodes._last)     { nodes->push(loadConLNodes._last); }
 3262 
 3263     assert(nodes->length() >= 1, "must have created at least 1 node");
 3264   %}
 3265 
 3266   // This enc_class is needed so that scheduler gets proper
 3267   // input mapping for latency computation.
 3268   enc_class enc_poll(immI dst, iRegLdst poll) %{
 3269     // Fake operand dst needed for PPC scheduler.
 3270     assert($dst$$constant == 0x0, "dst must be 0x0");
 3271 
 3272     // Mark the code position where the load from the safepoint
 3273     // polling page was emitted as relocInfo::poll_type.
 3274     __ relocate(relocInfo::poll_type);
 3275     __ load_from_polling_page($poll$$Register);
 3276   %}
 3277 
 3278   // A Java static call or a runtime call.
 3279   //
 3280   // Branch-and-link relative to a trampoline.
 3281   // The trampoline loads the target address and does a long branch to there.
 3282   // In case we call java, the trampoline branches to a interpreter_stub
 3283   // which loads the inline cache and the real call target from the constant pool.
 3284   //
 3285   // This basically looks like this:
 3286   //
 3287   // >>>> consts      -+  -+
 3288   //                   |   |- offset1
 3289   // [call target1]    | <-+
 3290   // [IC cache]        |- offset2
 3291   // [call target2] <--+
 3292   //
 3293   // <<<< consts
 3294   // >>>> insts
 3295   //
 3296   // bl offset16               -+  -+             ??? // How many bits available?
 3297   //                            |   |
 3298   // <<<< insts                 |   |
 3299   // >>>> stubs                 |   |
 3300   //                            |   |- trampoline_stub_Reloc
 3301   // trampoline stub:           | <-+
 3302   //   r2 = toc                 |
 3303   //   r2 = [r2 + offset1]      |       // Load call target1 from const section
 3304   //   mtctr r2                 |
 3305   //   bctr                     |- static_stub_Reloc
 3306   // comp_to_interp_stub:   <---+
 3307   //   r1 = toc
 3308   //   ICreg = [r1 + IC_offset]         // Load IC from const section
 3309   //   r1    = [r1 + offset2]           // Load call target2 from const section
 3310   //   mtctr r1
 3311   //   bctr
 3312   //
 3313   // <<<< stubs
 3314   //
 3315   // The call instruction in the code either
 3316   // - Branches directly to a compiled method if the offset is encodable in instruction.
 3317   // - Branches to the trampoline stub if the offset to the compiled method is not encodable.
 3318   // - Branches to the compiled_to_interp stub if the target is interpreted.
 3319   //
 3320   // Further there are three relocations from the loads to the constants in
 3321   // the constant section.
 3322   //
 3323   // Usage of r1 and r2 in the stubs allows to distinguish them.
 3324   enc_class enc_java_static_call(method meth) %{
 3325     address entry_point = (address)$meth$$method;
 3326 
 3327     if (!_method) {
 3328       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3329       emit_call_with_trampoline_stub(masm, entry_point, relocInfo::runtime_call_type);
 3330       if (ciEnv::current()->failing()) { return; } // Code cache may be full.
 3331     } else {
 3332       // Remember the offset not the address.
 3333       const int start_offset = __ offset();
 3334 
 3335       // The trampoline stub.
 3336       // No entry point given, use the current pc.
 3337       // Make sure branch fits into
 3338       if (entry_point == 0) entry_point = __ pc();
 3339 
 3340       // Put the entry point as a constant into the constant pool.
 3341       const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
 3342       if (entry_point_toc_addr == nullptr) {
 3343         ciEnv::current()->record_out_of_memory_failure();
 3344         return;
 3345       }
 3346       const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 3347 
 3348       // Emit the trampoline stub which will be related to the branch-and-link below.
 3349       CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, start_offset);
 3350       if (ciEnv::current()->failing()) { return; } // Code cache may be full.
 3351       int method_index = resolved_method_index(masm);
 3352       __ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3353                   : static_call_Relocation::spec(method_index));
 3354 
 3355       // The real call.
 3356       // Note: At this point we do not have the address of the trampoline
 3357       // stub, and the entry point might be too far away for bl, so __ pc()
 3358       // serves as dummy and the bl will be patched later.
 3359       __ set_inst_mark();
 3360       __ bl(__ pc());  // Emits a relocation.
 3361 
 3362       // The stub for call to interpreter.
 3363       address stub = CompiledDirectCall::emit_to_interp_stub(masm);
 3364       __ clear_inst_mark();
 3365       if (stub == nullptr) {
 3366         ciEnv::current()->record_failure("CodeCache is full");
 3367         return;
 3368       }
 3369     }
 3370     __ post_call_nop();
 3371   %}
 3372 
 3373   // Second node of expanded dynamic call - the call.
 3374   enc_class enc_java_dynamic_call_sched(method meth) %{
 3375     if (!ra_->C->output()->in_scratch_emit_size()) {
 3376       // Create a call trampoline stub for the given method.
 3377       const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
 3378       const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
 3379       if (entry_point_const == nullptr) {
 3380         ciEnv::current()->record_out_of_memory_failure();
 3381         return;
 3382       }
 3383       const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
 3384       CallStubImpl::emit_trampoline_stub(masm, entry_point_const_toc_offset, __ offset());
 3385       if (ra_->C->env()->failing()) { return; } // Code cache may be full.
 3386 
 3387       // Build relocation at call site with ic position as data.
 3388       assert((_load_ic_hi_node != nullptr && _load_ic_node == nullptr) ||
 3389              (_load_ic_hi_node == nullptr && _load_ic_node != nullptr),
 3390              "must have one, but can't have both");
 3391       assert((_load_ic_hi_node != nullptr && _load_ic_hi_node->_cbuf_insts_offset != -1) ||
 3392              (_load_ic_node != nullptr    && _load_ic_node->_cbuf_insts_offset != -1),
 3393              "must contain instruction offset");
 3394       const int virtual_call_oop_addr_offset = _load_ic_hi_node != nullptr
 3395         ? _load_ic_hi_node->_cbuf_insts_offset
 3396         : _load_ic_node->_cbuf_insts_offset;
 3397       const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
 3398       assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
 3399              "should be load from TOC");
 3400       int method_index = resolved_method_index(masm);
 3401       __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
 3402     }
 3403 
 3404     // At this point I do not have the address of the trampoline stub,
 3405     // and the entry point might be too far away for bl. Pc() serves
 3406     // as dummy and bl will be patched later.
 3407     __ bl((address) __ pc());
 3408     __ post_call_nop();
 3409   %}
 3410 
 3411   // postalloc expand emitter for virtual calls.
 3412   enc_class postalloc_expand_java_dynamic_call_sched(method meth, iRegLdst toc) %{
 3413 
 3414     // Create the nodes for loading the IC from the TOC.
 3415     loadConLNodesTuple loadConLNodes_IC =
 3416       loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) Universe::non_oop_word()),
 3417                                 OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
 3418 
 3419     // Create the call node.
 3420     CallDynamicJavaDirectSchedNode *call = new CallDynamicJavaDirectSchedNode();
 3421     call->_method_handle_invoke = _method_handle_invoke;
 3422     call->_vtable_index      = _vtable_index;
 3423     call->_method            = _method;
 3424     call->_optimized_virtual = _optimized_virtual;
 3425     call->_tf                = _tf;
 3426     call->_entry_point       = _entry_point;
 3427     call->_cnt               = _cnt;
 3428     call->_guaranteed_safepoint = true;
 3429     call->_oop_map           = _oop_map;
 3430     call->_jvms              = _jvms;
 3431     call->_jvmadj            = _jvmadj;
 3432     call->_has_ea_local_in_scope = _has_ea_local_in_scope;
 3433     call->_in_rms            = _in_rms;
 3434     call->_nesting           = _nesting;
 3435     call->_override_symbolic_info = _override_symbolic_info;
 3436     call->_arg_escape        = _arg_escape;
 3437 
 3438     // New call needs all inputs of old call.
 3439     // Req...
 3440     for (uint i = 0; i < req(); ++i) {
 3441       // The expanded node does not need toc any more.
 3442       // Add the inline cache constant here instead. This expresses the
 3443       // register of the inline cache must be live at the call.
 3444       // Else we would have to adapt JVMState by -1.
 3445       if (i == mach_constant_base_node_input()) {
 3446         call->add_req(loadConLNodes_IC._last);
 3447       } else {
 3448         call->add_req(in(i));
 3449       }
 3450     }
 3451     // ...as well as prec
 3452     for (uint i = req(); i < len(); ++i) {
 3453       call->add_prec(in(i));
 3454     }
 3455 
 3456     // Remember nodes loading the inline cache into r19.
 3457     call->_load_ic_hi_node = loadConLNodes_IC._large_hi;
 3458     call->_load_ic_node    = loadConLNodes_IC._small;
 3459 
 3460     // Operands for new nodes.
 3461     call->_opnds[0] = _opnds[0];
 3462     call->_opnds[1] = _opnds[1];
 3463 
 3464     // Only the inline cache is associated with a register.
 3465     assert(Matcher::inline_cache_reg() == OptoReg::Name(R19_num), "ic reg should be R19");
 3466 
 3467     // Push new nodes.
 3468     if (loadConLNodes_IC._large_hi) nodes->push(loadConLNodes_IC._large_hi);
 3469     if (loadConLNodes_IC._last)     nodes->push(loadConLNodes_IC._last);
 3470     nodes->push(call);
 3471   %}
 3472 
 3473   // Compound version of call dynamic
 3474   // Toc is only passed so that it can be used in ins_encode statement.
 3475   // In the code we have to use $constanttablebase.
 3476   enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
 3477     int start_offset = __ offset();
 3478 
 3479     Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
 3480 
 3481     int vtable_index = this->_vtable_index;
 3482     if (vtable_index < 0) {
 3483       // Must be invalid_vtable_index, not nonvirtual_vtable_index.
 3484       assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
 3485       Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
 3486 
 3487       // Virtual call relocation will point to ic load.
 3488       address virtual_call_meta_addr = __ pc();
 3489       // Load a clear inline cache.
 3490       AddressLiteral empty_ic((address) Universe::non_oop_word());
 3491       bool success = __ load_const_from_method_toc(ic_reg, empty_ic, Rtoc, /*fixed_size*/ true);
 3492       if (!success) {
 3493         ciEnv::current()->record_out_of_memory_failure();
 3494         return;
 3495       }
 3496       // CALL to fixup routine.  Fixup routine uses ScopeDesc info
 3497       // to determine who we intended to call.
 3498       __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
 3499       emit_call_with_trampoline_stub(masm, (address)$meth$$method, relocInfo::none);
 3500       if (ciEnv::current()->failing()) { return; } // Code cache may be full.
 3501       assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
 3502              "Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
 3503     } else {
 3504       assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
 3505       // Go thru the vtable. Get receiver klass. Receiver already
 3506       // checked for non-null. If we'll go thru a C2I adapter, the
 3507       // interpreter expects method in R19_method.
 3508 
 3509       __ load_klass(R11_scratch1, R3);
 3510 
 3511       int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
 3512       int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
 3513       __ li(R19_method, v_off);
 3514       __ ldx(R19_method/*method*/, R19_method/*method offset*/, R11_scratch1/*class*/);
 3515       // NOTE: for vtable dispatches, the vtable entry will never be
 3516       // null. However it may very well end up in handle_wrong_method
 3517       // if the method is abstract for the particular class.
 3518       __ ld(R11_scratch1, in_bytes(Method::from_compiled_offset()), R19_method);
 3519       // Call target. Either compiled code or C2I adapter.
 3520       __ mtctr(R11_scratch1);
 3521       __ bctrl();
 3522       assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
 3523              "Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
 3524     }
 3525     __ post_call_nop();
 3526   %}
 3527 
 3528   // a runtime call
 3529   enc_class enc_java_to_runtime_call (method meth) %{
 3530     const address start_pc = __ pc();
 3531 
 3532 #if defined(ABI_ELFv2)
 3533     address entry= !($meth$$method) ? nullptr : (address)$meth$$method;
 3534     __ call_c(entry, relocInfo::runtime_call_type);
 3535     __ post_call_nop();
 3536 #else
 3537     // The function we're going to call.
 3538     FunctionDescriptor fdtemp;
 3539     const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
 3540 
 3541     Register Rtoc = R12_scratch2;
 3542     // Calculate the method's TOC.
 3543     __ calculate_address_from_global_toc(Rtoc, __ method_toc());
 3544     // Put entry, env, toc into the constant pool, this needs up to 3 constant
 3545     // pool entries; call_c_using_toc will optimize the call.
 3546     bool success = __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
 3547     if (!success) {
 3548       ciEnv::current()->record_out_of_memory_failure();
 3549       return;
 3550     }
 3551     __ post_call_nop();
 3552 #endif
 3553 
 3554     // Check the ret_addr_offset.
 3555     assert(((MachCallRuntimeNode*)this)->ret_addr_offset() ==  __ last_calls_return_pc() - start_pc,
 3556            "Fix constant in ret_addr_offset()");
 3557   %}
 3558 
 3559   // Move to ctr for leaf call.
 3560   // This enc_class is needed so that scheduler gets proper
 3561   // input mapping for latency computation.
 3562   enc_class enc_leaf_call_mtctr(iRegLsrc src) %{
 3563     __ mtctr($src$$Register);
 3564   %}
 3565 
 3566   // Postalloc expand emitter for runtime leaf calls.
 3567   enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
 3568     loadConLNodesTuple loadConLNodes_Entry;
 3569 #if defined(ABI_ELFv2)
 3570     jlong entry_address = (jlong) this->entry_point();
 3571     assert(entry_address, "need address here");
 3572     loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
 3573                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 3574 #else
 3575     // Get the struct that describes the function we are about to call.
 3576     FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
 3577     assert(fd, "need fd here");
 3578     jlong entry_address = (jlong) fd->entry();
 3579     // new nodes
 3580     loadConLNodesTuple loadConLNodes_Env;
 3581     loadConLNodesTuple loadConLNodes_Toc;
 3582 
 3583     // Create nodes and operands for loading the entry point.
 3584     loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
 3585                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 3586 
 3587 
 3588     // Create nodes and operands for loading the env pointer.
 3589     if (fd->env() != nullptr) {
 3590       loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
 3591                                                     OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
 3592     } else {
 3593       loadConLNodes_Env._large_hi = nullptr;
 3594       loadConLNodes_Env._large_lo = nullptr;
 3595       loadConLNodes_Env._small    = nullptr;
 3596       loadConLNodes_Env._last = new loadConL16Node();
 3597       loadConLNodes_Env._last->_opnds[0] = new iRegLdstOper();
 3598       loadConLNodes_Env._last->_opnds[1] = new immL16Oper(0);
 3599       ra_->set_pair(loadConLNodes_Env._last->_idx, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
 3600     }
 3601 
 3602     // Create nodes and operands for loading the Toc point.
 3603     loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
 3604                                                   OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
 3605 #endif // ABI_ELFv2
 3606     // mtctr node
 3607     MachNode *mtctr = new CallLeafDirect_mtctrNode();
 3608 
 3609     assert(loadConLNodes_Entry._last != nullptr, "entry must exist");
 3610     mtctr->add_req(0, loadConLNodes_Entry._last);
 3611 
 3612     mtctr->_opnds[0] = new iRegLdstOper();
 3613     mtctr->_opnds[1] = new iRegLdstOper();
 3614 
 3615     // call node
 3616     MachCallLeafNode *call = new CallLeafDirectNode();
 3617 
 3618     call->_opnds[0] = _opnds[0];
 3619     call->_opnds[1] = new methodOper((intptr_t) entry_address); // May get set later.
 3620 
 3621     // Make the new call node look like the old one.
 3622     call->_name        = _name;
 3623     call->_tf          = _tf;
 3624     call->_entry_point = _entry_point;
 3625     call->_cnt         = _cnt;
 3626     call->_guaranteed_safepoint = false;
 3627     call->_oop_map     = _oop_map;
 3628     guarantee(!_jvms, "You must clone the jvms and adapt the offsets by fix_jvms().");
 3629     call->_jvms        = nullptr;
 3630     call->_jvmadj      = _jvmadj;
 3631     call->_in_rms      = _in_rms;
 3632     call->_nesting     = _nesting;
 3633 
 3634     // New call needs all inputs of old call.
 3635     // Req...
 3636     for (uint i = 0; i < req(); ++i) {
 3637       if (i != mach_constant_base_node_input()) {
 3638         call->add_req(in(i));
 3639       }
 3640     }
 3641 
 3642     // These must be reqired edges, as the registers are live up to
 3643     // the call. Else the constants are handled as kills.
 3644     call->add_req(mtctr);
 3645 #if !defined(ABI_ELFv2)
 3646     call->add_req(loadConLNodes_Env._last);
 3647     call->add_req(loadConLNodes_Toc._last);
 3648 #endif
 3649 
 3650     // ...as well as prec
 3651     for (uint i = req(); i < len(); ++i) {
 3652       call->add_prec(in(i));
 3653     }
 3654 
 3655     // registers
 3656     ra_->set1(mtctr->_idx, OptoReg::Name(SR_CTR_num));
 3657 
 3658     // Insert the new nodes.
 3659     if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
 3660     if (loadConLNodes_Entry._last)     nodes->push(loadConLNodes_Entry._last);
 3661 #if !defined(ABI_ELFv2)
 3662     if (loadConLNodes_Env._large_hi)   nodes->push(loadConLNodes_Env._large_hi);
 3663     if (loadConLNodes_Env._last)       nodes->push(loadConLNodes_Env._last);
 3664     if (loadConLNodes_Toc._large_hi)   nodes->push(loadConLNodes_Toc._large_hi);
 3665     if (loadConLNodes_Toc._last)       nodes->push(loadConLNodes_Toc._last);
 3666 #endif
 3667     nodes->push(mtctr);
 3668     nodes->push(call);
 3669   %}
 3670 %}
 3671 
 3672 //----------FRAME--------------------------------------------------------------
 3673 // Definition of frame structure and management information.
 3674 
 3675 frame %{
 3676   // These two registers define part of the calling convention between
 3677   // compiled code and the interpreter.
 3678 
 3679   // Inline Cache Register or method for I2C.
 3680   inline_cache_reg(R19); // R19_method
 3681 
 3682   // Optional: name the operand used by cisc-spilling to access
 3683   // [stack_pointer + offset].
 3684   cisc_spilling_operand_name(indOffset);
 3685 
 3686   // Number of stack slots consumed by a Monitor enter.
 3687   sync_stack_slots((frame::jit_monitor_size / VMRegImpl::stack_slot_size));
 3688 
 3689   // Compiled code's Frame Pointer.
 3690   frame_pointer(R1); // R1_SP
 3691 
 3692   // Interpreter stores its frame pointer in a register which is
 3693   // stored to the stack by I2CAdaptors. I2CAdaptors convert from
 3694   // interpreted java to compiled java.
 3695   //
 3696   // R14_state holds pointer to caller's cInterpreter.
 3697   interpreter_frame_pointer(R14); // R14_state
 3698 
 3699   stack_alignment(frame::alignment_in_bytes);
 3700 
 3701   // Number of outgoing stack slots killed above the
 3702   // out_preserve_stack_slots for calls to C. Supports the var-args
 3703   // backing area for register parms.
 3704   //
 3705   varargs_C_out_slots_killed(((frame::native_abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
 3706 
 3707   // The after-PROLOG location of the return address. Location of
 3708   // return address specifies a type (REG or STACK) and a number
 3709   // representing the register number (i.e. - use a register name) or
 3710   // stack slot.
 3711   //
 3712   // A: Link register is stored in stack slot ...
 3713   // M:  ... but it's in the caller's frame according to PPC-64 ABI.
 3714   // J: Therefore, we make sure that the link register is also in R11_scratch1
 3715   //    at the end of the prolog.
 3716   // B: We use R20, now.
 3717   //return_addr(REG R20);
 3718 
 3719   // G: After reading the comments made by all the luminaries on their
 3720   //    failure to tell the compiler where the return address really is,
 3721   //    I hardly dare to try myself.  However, I'm convinced it's in slot
 3722   //    4 what apparently works and saves us some spills.
 3723   return_addr(STACK 4);
 3724 
 3725   // Location of native (C/C++) and interpreter return values. This
 3726   // is specified to be the same as Java. In the 32-bit VM, long
 3727   // values are actually returned from native calls in O0:O1 and
 3728   // returned to the interpreter in I0:I1. The copying to and from
 3729   // the register pairs is done by the appropriate call and epilog
 3730   // opcodes. This simplifies the register allocator.
 3731   c_return_value %{
 3732     assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
 3733             (ideal_reg == Op_RegN && CompressedOops::base() == nullptr && CompressedOops::shift() == 0),
 3734             "only return normal values");
 3735     // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
 3736     static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
 3737     static int typeToRegHi[Op_RegL+1] = { 0,   0,     OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
 3738     return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
 3739   %}
 3740 
 3741   // Location of compiled Java return values.  Same as C
 3742   return_value %{
 3743     assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
 3744             (ideal_reg == Op_RegN && CompressedOops::base() == nullptr && CompressedOops::shift() == 0),
 3745             "only return normal values");
 3746     // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
 3747     static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
 3748     static int typeToRegHi[Op_RegL+1] = { 0,   0,     OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
 3749     return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
 3750   %}
 3751 %}
 3752 
 3753 
 3754 //----------ATTRIBUTES---------------------------------------------------------
 3755 
 3756 //----------Operand Attributes-------------------------------------------------
 3757 op_attrib op_cost(1);          // Required cost attribute.
 3758 
 3759 //----------Instruction Attributes---------------------------------------------
 3760 
 3761 // Cost attribute. required.
 3762 ins_attrib ins_cost(DEFAULT_COST);
 3763 
 3764 // Is this instruction a non-matching short branch variant of some
 3765 // long branch? Not required.
 3766 ins_attrib ins_short_branch(0);
 3767 
 3768 ins_attrib ins_is_TrapBasedCheckNode(true);
 3769 
 3770 // Number of constants.
 3771 // This instruction uses the given number of constants
 3772 // (optional attribute).
 3773 // This is needed to determine in time whether the constant pool will
 3774 // exceed 4000 entries. Before postalloc_expand the overall number of constants
 3775 // is determined. It's also used to compute the constant pool size
 3776 // in Output().
 3777 ins_attrib ins_num_consts(0);
 3778 
 3779 // Required alignment attribute (must be a power of 2) specifies the
 3780 // alignment that some part of the instruction (not necessarily the
 3781 // start) requires. If > 1, a compute_padding() function must be
 3782 // provided for the instruction.
 3783 ins_attrib ins_alignment(1);
 3784 
 3785 // Enforce/prohibit rematerializations.
 3786 // - If an instruction is attributed with 'ins_cannot_rematerialize(true)'
 3787 //   then rematerialization of that instruction is prohibited and the
 3788 //   instruction's value will be spilled if necessary.
 3789 //   Causes that MachNode::rematerialize() returns false.
 3790 // - If an instruction is attributed with 'ins_should_rematerialize(true)'
 3791 //   then rematerialization should be enforced and a copy of the instruction
 3792 //   should be inserted if possible; rematerialization is not guaranteed.
 3793 //   Note: this may result in rematerializations in front of every use.
 3794 //   Causes that MachNode::rematerialize() can return true.
 3795 // (optional attribute)
 3796 ins_attrib ins_cannot_rematerialize(false);
 3797 ins_attrib ins_should_rematerialize(false);
 3798 
 3799 // Instruction has variable size depending on alignment.
 3800 ins_attrib ins_variable_size_depending_on_alignment(false);
 3801 
 3802 // Instruction is a nop.
 3803 ins_attrib ins_is_nop(false);
 3804 
 3805 // Instruction is mapped to a MachIfFastLock node (instead of MachFastLock).
 3806 ins_attrib ins_use_mach_if_fast_lock_node(false);
 3807 
 3808 // Field for the toc offset of a constant.
 3809 //
 3810 // This is needed if the toc offset is not encodable as an immediate in
 3811 // the PPC load instruction. If so, the upper (hi) bits of the offset are
 3812 // added to the toc, and from this a load with immediate is performed.
 3813 // With postalloc expand, we get two nodes that require the same offset
 3814 // but which don't know about each other. The offset is only known
 3815 // when the constant is added to the constant pool during emitting.
 3816 // It is generated in the 'hi'-node adding the upper bits, and saved
 3817 // in this node.  The 'lo'-node has a link to the 'hi'-node and reads
 3818 // the offset from there when it gets encoded.
 3819 ins_attrib ins_field_const_toc_offset(0);
 3820 ins_attrib ins_field_const_toc_offset_hi_node(0);
 3821 
 3822 // A field that can hold the instructions offset in the code buffer.
 3823 // Set in the nodes emitter.
 3824 ins_attrib ins_field_cbuf_insts_offset(-1);
 3825 
 3826 // Fields for referencing a call's load-IC-node.
 3827 // If the toc offset can not be encoded as an immediate in a load, we
 3828 // use two nodes.
 3829 ins_attrib ins_field_load_ic_hi_node(0);
 3830 ins_attrib ins_field_load_ic_node(0);
 3831 
 3832 //----------OPERANDS-----------------------------------------------------------
 3833 // Operand definitions must precede instruction definitions for correct
 3834 // parsing in the ADLC because operands constitute user defined types
 3835 // which are used in instruction definitions.
 3836 //
 3837 // Formats are generated automatically for constants and base registers.
 3838 
 3839 operand vecX() %{
 3840   constraint(ALLOC_IN_RC(vs_reg));
 3841   match(VecX);
 3842 
 3843   format %{ %}
 3844   interface(REG_INTER);
 3845 %}
 3846 
 3847 //----------Simple Operands----------------------------------------------------
 3848 // Immediate Operands
 3849 
 3850 // Integer Immediate: 32-bit
 3851 operand immI() %{
 3852   match(ConI);
 3853   op_cost(40);
 3854   format %{ %}
 3855   interface(CONST_INTER);
 3856 %}
 3857 
 3858 operand immI8() %{
 3859   predicate(Assembler::is_simm(n->get_int(), 8));
 3860   op_cost(0);
 3861   match(ConI);
 3862   format %{ %}
 3863   interface(CONST_INTER);
 3864 %}
 3865 
 3866 // Integer Immediate: 16-bit
 3867 operand immI16() %{
 3868   predicate(Assembler::is_simm(n->get_int(), 16));
 3869   op_cost(0);
 3870   match(ConI);
 3871   format %{ %}
 3872   interface(CONST_INTER);
 3873 %}
 3874 
 3875 // Integer Immediate: 32-bit, where lowest 16 bits are 0x0000.
 3876 operand immIhi16() %{
 3877   predicate(((n->get_int() & 0xffff0000) != 0) && ((n->get_int() & 0xffff) == 0));
 3878   match(ConI);
 3879   op_cost(0);
 3880   format %{ %}
 3881   interface(CONST_INTER);
 3882 %}
 3883 
 3884 // Integer Immediate: 32-bit immediate for prefixed addi and load/store.
 3885 operand immI32() %{
 3886   predicate(PowerArchitecturePPC64 >= 10);
 3887   op_cost(0);
 3888   match(ConI);
 3889   format %{ %}
 3890   interface(CONST_INTER);
 3891 %}
 3892 
 3893 operand immInegpow2() %{
 3894   predicate(is_power_of_2(-(juint)(n->get_int())));
 3895   match(ConI);
 3896   op_cost(0);
 3897   format %{ %}
 3898   interface(CONST_INTER);
 3899 %}
 3900 
 3901 operand immIpow2minus1() %{
 3902   predicate(is_power_of_2((juint)(n->get_int()) + 1u));
 3903   match(ConI);
 3904   op_cost(0);
 3905   format %{ %}
 3906   interface(CONST_INTER);
 3907 %}
 3908 
 3909 operand immIpowerOf2() %{
 3910   predicate(is_power_of_2((juint)(n->get_int())));
 3911   match(ConI);
 3912   op_cost(0);
 3913   format %{ %}
 3914   interface(CONST_INTER);
 3915 %}
 3916 
 3917 // Unsigned Integer Immediate: the values 0-31
 3918 operand uimmI5() %{
 3919   predicate(Assembler::is_uimm(n->get_int(), 5));
 3920   match(ConI);
 3921   op_cost(0);
 3922   format %{ %}
 3923   interface(CONST_INTER);
 3924 %}
 3925 
 3926 // Unsigned Integer Immediate: 6-bit
 3927 operand uimmI6() %{
 3928   predicate(Assembler::is_uimm(n->get_int(), 6));
 3929   match(ConI);
 3930   op_cost(0);
 3931   format %{ %}
 3932   interface(CONST_INTER);
 3933 %}
 3934 
 3935 // Unsigned Integer Immediate:  6-bit int, greater than 32
 3936 operand uimmI6_ge32() %{
 3937   predicate(Assembler::is_uimm(n->get_int(), 6) && n->get_int() >= 32);
 3938   match(ConI);
 3939   op_cost(0);
 3940   format %{ %}
 3941   interface(CONST_INTER);
 3942 %}
 3943 
 3944 // Unsigned Integer Immediate: 15-bit
 3945 operand uimmI15() %{
 3946   predicate(Assembler::is_uimm(n->get_int(), 15));
 3947   match(ConI);
 3948   op_cost(0);
 3949   format %{ %}
 3950   interface(CONST_INTER);
 3951 %}
 3952 
 3953 // Unsigned Integer Immediate: 16-bit
 3954 operand uimmI16() %{
 3955   predicate(Assembler::is_uimm(n->get_int(), 16));
 3956   match(ConI);
 3957   op_cost(0);
 3958   format %{ %}
 3959   interface(CONST_INTER);
 3960 %}
 3961 
 3962 // constant 'int 0'.
 3963 operand immI_0() %{
 3964   predicate(n->get_int() == 0);
 3965   match(ConI);
 3966   op_cost(0);
 3967   format %{ %}
 3968   interface(CONST_INTER);
 3969 %}
 3970 
 3971 // constant 'int 1'.
 3972 operand immI_1() %{
 3973   predicate(n->get_int() == 1);
 3974   match(ConI);
 3975   op_cost(0);
 3976   format %{ %}
 3977   interface(CONST_INTER);
 3978 %}
 3979 
 3980 // constant 'int -1'.
 3981 operand immI_minus1() %{
 3982   predicate(n->get_int() == -1);
 3983   match(ConI);
 3984   op_cost(0);
 3985   format %{ %}
 3986   interface(CONST_INTER);
 3987 %}
 3988 
 3989 // int value 16.
 3990 operand immI_16() %{
 3991   predicate(n->get_int() == 16);
 3992   match(ConI);
 3993   op_cost(0);
 3994   format %{ %}
 3995   interface(CONST_INTER);
 3996 %}
 3997 
 3998 // int value 24.
 3999 operand immI_24() %{
 4000   predicate(n->get_int() == 24);
 4001   match(ConI);
 4002   op_cost(0);
 4003   format %{ %}
 4004   interface(CONST_INTER);
 4005 %}
 4006 
 4007 // Compressed oops constants
 4008 // Pointer Immediate
 4009 operand immN() %{
 4010   match(ConN);
 4011 
 4012   op_cost(10);
 4013   format %{ %}
 4014   interface(CONST_INTER);
 4015 %}
 4016 
 4017 // nullptr Pointer Immediate
 4018 operand immN_0() %{
 4019   predicate(n->get_narrowcon() == 0);
 4020   match(ConN);
 4021 
 4022   op_cost(0);
 4023   format %{ %}
 4024   interface(CONST_INTER);
 4025 %}
 4026 
 4027 // Compressed klass constants
 4028 operand immNKlass() %{
 4029   match(ConNKlass);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 // This operand can be used to avoid matching of an instruct
 4037 // with chain rule.
 4038 operand immNKlass_NM() %{
 4039   match(ConNKlass);
 4040   predicate(false);
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 // Pointer Immediate: 64-bit
 4047 operand immP() %{
 4048   match(ConP);
 4049   op_cost(0);
 4050   format %{ %}
 4051   interface(CONST_INTER);
 4052 %}
 4053 
 4054 // Operand to avoid match of loadConP.
 4055 // This operand can be used to avoid matching of an instruct
 4056 // with chain rule.
 4057 operand immP_NM() %{
 4058   match(ConP);
 4059   predicate(false);
 4060   op_cost(0);
 4061   format %{ %}
 4062   interface(CONST_INTER);
 4063 %}
 4064 
 4065 // constant 'pointer 0'.
 4066 operand immP_0() %{
 4067   predicate(n->get_ptr() == 0);
 4068   match(ConP);
 4069   op_cost(0);
 4070   format %{ %}
 4071   interface(CONST_INTER);
 4072 %}
 4073 
 4074 // pointer 0x0 or 0x1
 4075 operand immP_0or1() %{
 4076   predicate((n->get_ptr() == 0) || (n->get_ptr() == 1));
 4077   match(ConP);
 4078   op_cost(0);
 4079   format %{ %}
 4080   interface(CONST_INTER);
 4081 %}
 4082 
 4083 operand immL() %{
 4084   match(ConL);
 4085   op_cost(40);
 4086   format %{ %}
 4087   interface(CONST_INTER);
 4088 %}
 4089 
 4090 operand immLmax30() %{
 4091   predicate((n->get_long() <= 30));
 4092   match(ConL);
 4093   op_cost(0);
 4094   format %{ %}
 4095   interface(CONST_INTER);
 4096 %}
 4097 
 4098 // Long Immediate: 16-bit
 4099 operand immL16() %{
 4100   predicate(Assembler::is_simm(n->get_long(), 16));
 4101   match(ConL);
 4102   op_cost(0);
 4103   format %{ %}
 4104   interface(CONST_INTER);
 4105 %}
 4106 
 4107 // Long Immediate: 16-bit, 4-aligned
 4108 operand immL16Alg4() %{
 4109   predicate(Assembler::is_simm(n->get_long(), 16) && ((n->get_long() & 0x3) == 0));
 4110   match(ConL);
 4111   op_cost(0);
 4112   format %{ %}
 4113   interface(CONST_INTER);
 4114 %}
 4115 
 4116 // Long Immediate: 32-bit, where lowest 16 bits are 0x0000.
 4117 operand immL32hi16() %{
 4118   predicate(Assembler::is_simm(n->get_long(), 32) && ((n->get_long() & 0xffffL) == 0L));
 4119   match(ConL);
 4120   op_cost(0);
 4121   format %{ %}
 4122   interface(CONST_INTER);
 4123 %}
 4124 
 4125 // Long Immediate: 32-bit
 4126 operand immL32() %{
 4127   predicate(Assembler::is_simm(n->get_long(), 32));
 4128   match(ConL);
 4129   op_cost(0);
 4130   format %{ %}
 4131   interface(CONST_INTER);
 4132 %}
 4133 
 4134 // Long Immediate: 34-bit, immediate field in prefixed addi and load/store.
 4135 operand immL34() %{
 4136   predicate(PowerArchitecturePPC64 >= 10 && Assembler::is_simm(n->get_long(), 34));
 4137   match(ConL);
 4138   op_cost(0);
 4139   format %{ %}
 4140   interface(CONST_INTER);
 4141 %}
 4142 
 4143 // Long Immediate: 64-bit, where highest 16 bits are not 0x0000.
 4144 operand immLhighest16() %{
 4145   predicate((n->get_long() & 0xffff000000000000L) != 0L && (n->get_long() & 0x0000ffffffffffffL) == 0L);
 4146   match(ConL);
 4147   op_cost(0);
 4148   format %{ %}
 4149   interface(CONST_INTER);
 4150 %}
 4151 
 4152 operand immLnegpow2() %{
 4153   predicate(is_power_of_2(-(julong)(n->get_long())));
 4154   match(ConL);
 4155   op_cost(0);
 4156   format %{ %}
 4157   interface(CONST_INTER);
 4158 %}
 4159 
 4160 operand immLpow2minus1() %{
 4161   predicate(is_power_of_2((julong)(n->get_long()) + 1ull));
 4162   match(ConL);
 4163   op_cost(0);
 4164   format %{ %}
 4165   interface(CONST_INTER);
 4166 %}
 4167 
 4168 // constant 'long 0'.
 4169 operand immL_0() %{
 4170   predicate(n->get_long() == 0L);
 4171   match(ConL);
 4172   op_cost(0);
 4173   format %{ %}
 4174   interface(CONST_INTER);
 4175 %}
 4176 
 4177 // constat ' long -1'.
 4178 operand immL_minus1() %{
 4179   predicate(n->get_long() == -1L);
 4180   match(ConL);
 4181   op_cost(0);
 4182   format %{ %}
 4183   interface(CONST_INTER);
 4184 %}
 4185 
 4186 // Long Immediate: low 32-bit mask
 4187 operand immL_32bits() %{
 4188   predicate(n->get_long() == 0xFFFFFFFFL);
 4189   match(ConL);
 4190   op_cost(0);
 4191   format %{ %}
 4192   interface(CONST_INTER);
 4193 %}
 4194 
 4195 // Unsigned Long Immediate: 16-bit
 4196 operand uimmL16() %{
 4197   predicate(Assembler::is_uimm(n->get_long(), 16));
 4198   match(ConL);
 4199   op_cost(0);
 4200   format %{ %}
 4201   interface(CONST_INTER);
 4202 %}
 4203 
 4204 // Float Immediate
 4205 operand immF() %{
 4206   match(ConF);
 4207   op_cost(40);
 4208   format %{ %}
 4209   interface(CONST_INTER);
 4210 %}
 4211 
 4212 // Float Immediate: +0.0f.
 4213 operand immF_0() %{
 4214   predicate(jint_cast(n->getf()) == 0);
 4215   match(ConF);
 4216 
 4217   op_cost(0);
 4218   format %{ %}
 4219   interface(CONST_INTER);
 4220 %}
 4221 
 4222 // Double Immediate
 4223 operand immD() %{
 4224   match(ConD);
 4225   op_cost(40);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 // Double Immediate: +0.0d.
 4231 operand immD_0() %{
 4232   predicate(jlong_cast(n->getd()) == 0);
 4233   match(ConD);
 4234 
 4235   op_cost(0);
 4236   format %{ %}
 4237   interface(CONST_INTER);
 4238 %}
 4239 
 4240 // Integer Register Operands
 4241 // Integer Destination Register
 4242 // See definition of reg_class bits32_reg_rw.
 4243 operand iRegIdst() %{
 4244   constraint(ALLOC_IN_RC(bits32_reg_rw));
 4245   match(RegI);
 4246   match(rscratch1RegI);
 4247   match(rscratch2RegI);
 4248   match(rarg1RegI);
 4249   match(rarg2RegI);
 4250   match(rarg3RegI);
 4251   match(rarg4RegI);
 4252   format %{ %}
 4253   interface(REG_INTER);
 4254 %}
 4255 
 4256 // Integer Source Register
 4257 // See definition of reg_class bits32_reg_ro.
 4258 operand iRegIsrc() %{
 4259   constraint(ALLOC_IN_RC(bits32_reg_ro));
 4260   match(RegI);
 4261   match(rscratch1RegI);
 4262   match(rscratch2RegI);
 4263   match(rarg1RegI);
 4264   match(rarg2RegI);
 4265   match(rarg3RegI);
 4266   match(rarg4RegI);
 4267   format %{ %}
 4268   interface(REG_INTER);
 4269 %}
 4270 
 4271 operand rscratch1RegI() %{
 4272   constraint(ALLOC_IN_RC(rscratch1_bits32_reg));
 4273   match(iRegIdst);
 4274   format %{ %}
 4275   interface(REG_INTER);
 4276 %}
 4277 
 4278 operand rscratch2RegI() %{
 4279   constraint(ALLOC_IN_RC(rscratch2_bits32_reg));
 4280   match(iRegIdst);
 4281   format %{ %}
 4282   interface(REG_INTER);
 4283 %}
 4284 
 4285 operand rarg1RegI() %{
 4286   constraint(ALLOC_IN_RC(rarg1_bits32_reg));
 4287   match(iRegIdst);
 4288   format %{ %}
 4289   interface(REG_INTER);
 4290 %}
 4291 
 4292 operand rarg2RegI() %{
 4293   constraint(ALLOC_IN_RC(rarg2_bits32_reg));
 4294   match(iRegIdst);
 4295   format %{ %}
 4296   interface(REG_INTER);
 4297 %}
 4298 
 4299 operand rarg3RegI() %{
 4300   constraint(ALLOC_IN_RC(rarg3_bits32_reg));
 4301   match(iRegIdst);
 4302   format %{ %}
 4303   interface(REG_INTER);
 4304 %}
 4305 
 4306 operand rarg4RegI() %{
 4307   constraint(ALLOC_IN_RC(rarg4_bits32_reg));
 4308   match(iRegIdst);
 4309   format %{ %}
 4310   interface(REG_INTER);
 4311 %}
 4312 
 4313 operand rarg1RegL() %{
 4314   constraint(ALLOC_IN_RC(rarg1_bits64_reg));
 4315   match(iRegLdst);
 4316   format %{ %}
 4317   interface(REG_INTER);
 4318 %}
 4319 
 4320 // Pointer Destination Register
 4321 // See definition of reg_class bits64_reg_rw.
 4322 operand iRegPdst() %{
 4323   constraint(ALLOC_IN_RC(bits64_reg_rw));
 4324   match(RegP);
 4325   match(rscratch1RegP);
 4326   match(rscratch2RegP);
 4327   match(rarg1RegP);
 4328   match(rarg2RegP);
 4329   match(rarg3RegP);
 4330   match(rarg4RegP);
 4331   format %{ %}
 4332   interface(REG_INTER);
 4333 %}
 4334 
 4335 // Pointer Destination Register
 4336 // Operand not using r11 and r12 (killed in epilog).
 4337 operand iRegPdstNoScratch() %{
 4338   constraint(ALLOC_IN_RC(bits64_reg_leaf_call));
 4339   match(RegP);
 4340   match(rarg1RegP);
 4341   match(rarg2RegP);
 4342   match(rarg3RegP);
 4343   match(rarg4RegP);
 4344   format %{ %}
 4345   interface(REG_INTER);
 4346 %}
 4347 
 4348 // Pointer Source Register
 4349 // See definition of reg_class bits64_reg_ro.
 4350 operand iRegPsrc() %{
 4351   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4352   match(RegP);
 4353   match(iRegPdst);
 4354   match(rscratch1RegP);
 4355   match(rscratch2RegP);
 4356   match(rarg1RegP);
 4357   match(rarg2RegP);
 4358   match(rarg3RegP);
 4359   match(rarg4RegP);
 4360   match(rarg5RegP);
 4361   match(rarg6RegP);
 4362   match(threadRegP);
 4363   format %{ %}
 4364   interface(REG_INTER);
 4365 %}
 4366 
 4367 // Thread operand.
 4368 operand threadRegP() %{
 4369   constraint(ALLOC_IN_RC(thread_bits64_reg));
 4370   match(iRegPdst);
 4371   format %{ "R16" %}
 4372   interface(REG_INTER);
 4373 %}
 4374 
 4375 operand rscratch1RegP() %{
 4376   constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
 4377   match(iRegPdst);
 4378   format %{ "R11" %}
 4379   interface(REG_INTER);
 4380 %}
 4381 
 4382 operand rscratch2RegP() %{
 4383   constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
 4384   match(iRegPdst);
 4385   format %{ %}
 4386   interface(REG_INTER);
 4387 %}
 4388 
 4389 operand rarg1RegP() %{
 4390   constraint(ALLOC_IN_RC(rarg1_bits64_reg));
 4391   match(iRegPdst);
 4392   format %{ %}
 4393   interface(REG_INTER);
 4394 %}
 4395 
 4396 operand rarg2RegP() %{
 4397   constraint(ALLOC_IN_RC(rarg2_bits64_reg));
 4398   match(iRegPdst);
 4399   format %{ %}
 4400   interface(REG_INTER);
 4401 %}
 4402 
 4403 operand rarg3RegP() %{
 4404   constraint(ALLOC_IN_RC(rarg3_bits64_reg));
 4405   match(iRegPdst);
 4406   format %{ %}
 4407   interface(REG_INTER);
 4408 %}
 4409 
 4410 operand rarg4RegP() %{
 4411   constraint(ALLOC_IN_RC(rarg4_bits64_reg));
 4412   match(iRegPdst);
 4413   format %{ %}
 4414   interface(REG_INTER);
 4415 %}
 4416 
 4417 operand rarg5RegP() %{
 4418   constraint(ALLOC_IN_RC(rarg5_bits64_reg));
 4419   match(iRegPdst);
 4420   format %{ %}
 4421   interface(REG_INTER);
 4422 %}
 4423 
 4424 operand rarg6RegP() %{
 4425   constraint(ALLOC_IN_RC(rarg6_bits64_reg));
 4426   match(iRegPdst);
 4427   format %{ %}
 4428   interface(REG_INTER);
 4429 %}
 4430 
 4431 operand iRegNsrc() %{
 4432   constraint(ALLOC_IN_RC(bits32_reg_ro));
 4433   match(RegN);
 4434   match(iRegNdst);
 4435 
 4436   format %{ %}
 4437   interface(REG_INTER);
 4438 %}
 4439 
 4440 operand iRegNdst() %{
 4441   constraint(ALLOC_IN_RC(bits32_reg_rw));
 4442   match(RegN);
 4443 
 4444   format %{ %}
 4445   interface(REG_INTER);
 4446 %}
 4447 
 4448 // Long Destination Register
 4449 // See definition of reg_class bits64_reg_rw.
 4450 operand iRegLdst() %{
 4451   constraint(ALLOC_IN_RC(bits64_reg_rw));
 4452   match(RegL);
 4453   match(rscratch1RegL);
 4454   match(rscratch2RegL);
 4455   format %{ %}
 4456   interface(REG_INTER);
 4457 %}
 4458 
 4459 // Long Source Register
 4460 // See definition of reg_class bits64_reg_ro.
 4461 operand iRegLsrc() %{
 4462   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4463   match(RegL);
 4464   match(iRegLdst);
 4465   match(rscratch1RegL);
 4466   match(rscratch2RegL);
 4467   format %{ %}
 4468   interface(REG_INTER);
 4469 %}
 4470 
 4471 // Special operand for ConvL2I.
 4472 operand iRegL2Isrc(iRegLsrc reg) %{
 4473   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4474   match(ConvL2I reg);
 4475   format %{ "ConvL2I($reg)" %}
 4476   interface(REG_INTER)
 4477 %}
 4478 
 4479 operand rscratch1RegL() %{
 4480   constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
 4481   match(RegL);
 4482   format %{ %}
 4483   interface(REG_INTER);
 4484 %}
 4485 
 4486 operand rscratch2RegL() %{
 4487   constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
 4488   match(RegL);
 4489   format %{ %}
 4490   interface(REG_INTER);
 4491 %}
 4492 
 4493 // Condition Code Flag Registers
 4494 operand flagsReg() %{
 4495   constraint(ALLOC_IN_RC(int_flags));
 4496   match(RegFlags);
 4497   format %{ %}
 4498   interface(REG_INTER);
 4499 %}
 4500 
 4501 operand flagsRegSrc() %{
 4502   constraint(ALLOC_IN_RC(int_flags_ro));
 4503   match(RegFlags);
 4504   match(flagsReg);
 4505   match(flagsRegCR0);
 4506   format %{ %}
 4507   interface(REG_INTER);
 4508 %}
 4509 
 4510 // Condition Code Flag Register CR0
 4511 operand flagsRegCR0() %{
 4512   constraint(ALLOC_IN_RC(int_flags_CR0));
 4513   match(RegFlags);
 4514   format %{ "CR0" %}
 4515   interface(REG_INTER);
 4516 %}
 4517 
 4518 operand flagsRegCR1() %{
 4519   constraint(ALLOC_IN_RC(int_flags_CR1));
 4520   match(RegFlags);
 4521   format %{ "CR1" %}
 4522   interface(REG_INTER);
 4523 %}
 4524 
 4525 operand flagsRegCR6() %{
 4526   constraint(ALLOC_IN_RC(int_flags_CR6));
 4527   match(RegFlags);
 4528   format %{ "CR6" %}
 4529   interface(REG_INTER);
 4530 %}
 4531 
 4532 operand regCTR() %{
 4533   constraint(ALLOC_IN_RC(ctr_reg));
 4534   // RegFlags should work. Introducing a RegSpecial type would cause a
 4535   // lot of changes.
 4536   match(RegFlags);
 4537   format %{"SR_CTR" %}
 4538   interface(REG_INTER);
 4539 %}
 4540 
 4541 operand regD() %{
 4542   constraint(ALLOC_IN_RC(dbl_reg));
 4543   match(RegD);
 4544   format %{ %}
 4545   interface(REG_INTER);
 4546 %}
 4547 
 4548 operand regF() %{
 4549   constraint(ALLOC_IN_RC(flt_reg));
 4550   match(RegF);
 4551   format %{ %}
 4552   interface(REG_INTER);
 4553 %}
 4554 
 4555 // Special Registers
 4556 
 4557 // Method Register
 4558 operand inline_cache_regP(iRegPdst reg) %{
 4559   constraint(ALLOC_IN_RC(r19_bits64_reg)); // inline_cache_reg
 4560   match(reg);
 4561   format %{ %}
 4562   interface(REG_INTER);
 4563 %}
 4564 
 4565 // Operands to remove register moves in unscaled mode.
 4566 // Match read/write registers with an EncodeP node if neither shift nor add are required.
 4567 operand iRegP2N(iRegPsrc reg) %{
 4568   predicate(false /* TODO: PPC port MatchDecodeNodes*/&& CompressedOops::shift() == 0);
 4569   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4570   match(EncodeP reg);
 4571   format %{ "$reg" %}
 4572   interface(REG_INTER)
 4573 %}
 4574 
 4575 operand iRegN2P(iRegNsrc reg) %{
 4576   predicate(false /* TODO: PPC port MatchDecodeNodes*/);
 4577   constraint(ALLOC_IN_RC(bits32_reg_ro));
 4578   match(DecodeN reg);
 4579   format %{ "$reg" %}
 4580   interface(REG_INTER)
 4581 %}
 4582 
 4583 operand iRegN2P_klass(iRegNsrc reg) %{
 4584   predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
 4585   constraint(ALLOC_IN_RC(bits32_reg_ro));
 4586   match(DecodeNKlass reg);
 4587   format %{ "$reg" %}
 4588   interface(REG_INTER)
 4589 %}
 4590 
 4591 //----------Complex Operands---------------------------------------------------
 4592 // Indirect Memory Reference
 4593 operand indirect(iRegPsrc reg) %{
 4594   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4595   match(reg);
 4596   op_cost(100);
 4597   format %{ "[$reg]" %}
 4598   interface(MEMORY_INTER) %{
 4599     base($reg);
 4600     index(0x0);
 4601     scale(0x0);
 4602     disp(0x0);
 4603   %}
 4604 %}
 4605 
 4606 // Indirect with Offset
 4607 operand indOffset16(iRegPsrc reg, immL16 offset) %{
 4608   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4609   match(AddP reg offset);
 4610   op_cost(100);
 4611   format %{ "[$reg + $offset]" %}
 4612   interface(MEMORY_INTER) %{
 4613     base($reg);
 4614     index(0x0);
 4615     scale(0x0);
 4616     disp($offset);
 4617   %}
 4618 %}
 4619 
 4620 // Indirect with 4-aligned Offset
 4621 operand indOffset16Alg4(iRegPsrc reg, immL16Alg4 offset) %{
 4622   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4623   match(AddP reg offset);
 4624   op_cost(100);
 4625   format %{ "[$reg + $offset]" %}
 4626   interface(MEMORY_INTER) %{
 4627     base($reg);
 4628     index(0x0);
 4629     scale(0x0);
 4630     disp($offset);
 4631   %}
 4632 %}
 4633 
 4634 //----------Complex Operands for Compressed OOPs-------------------------------
 4635 // Compressed OOPs with narrow_oop_shift == 0.
 4636 
 4637 // Indirect Memory Reference, compressed OOP
 4638 operand indirectNarrow(iRegNsrc reg) %{
 4639   predicate(false /* TODO: PPC port MatchDecodeNodes*/);
 4640   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4641   match(DecodeN reg);
 4642   op_cost(100);
 4643   format %{ "[$reg]" %}
 4644   interface(MEMORY_INTER) %{
 4645     base($reg);
 4646     index(0x0);
 4647     scale(0x0);
 4648     disp(0x0);
 4649   %}
 4650 %}
 4651 
 4652 operand indirectNarrow_klass(iRegNsrc reg) %{
 4653   predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
 4654   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4655   match(DecodeNKlass reg);
 4656   op_cost(100);
 4657   format %{ "[$reg]" %}
 4658   interface(MEMORY_INTER) %{
 4659     base($reg);
 4660     index(0x0);
 4661     scale(0x0);
 4662     disp(0x0);
 4663   %}
 4664 %}
 4665 
 4666 // Indirect with Offset, compressed OOP
 4667 operand indOffset16Narrow(iRegNsrc reg, immL16 offset) %{
 4668   predicate(false /* TODO: PPC port MatchDecodeNodes*/);
 4669   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4670   match(AddP (DecodeN reg) offset);
 4671   op_cost(100);
 4672   format %{ "[$reg + $offset]" %}
 4673   interface(MEMORY_INTER) %{
 4674     base($reg);
 4675     index(0x0);
 4676     scale(0x0);
 4677     disp($offset);
 4678   %}
 4679 %}
 4680 
 4681 operand indOffset16Narrow_klass(iRegNsrc reg, immL16 offset) %{
 4682   predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
 4683   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4684   match(AddP (DecodeNKlass reg) offset);
 4685   op_cost(100);
 4686   format %{ "[$reg + $offset]" %}
 4687   interface(MEMORY_INTER) %{
 4688     base($reg);
 4689     index(0x0);
 4690     scale(0x0);
 4691     disp($offset);
 4692   %}
 4693 %}
 4694 
 4695 // Indirect with 4-aligned Offset, compressed OOP
 4696 operand indOffset16NarrowAlg4(iRegNsrc reg, immL16Alg4 offset) %{
 4697   predicate(false /* TODO: PPC port MatchDecodeNodes*/);
 4698   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4699   match(AddP (DecodeN reg) offset);
 4700   op_cost(100);
 4701   format %{ "[$reg + $offset]" %}
 4702   interface(MEMORY_INTER) %{
 4703     base($reg);
 4704     index(0x0);
 4705     scale(0x0);
 4706     disp($offset);
 4707   %}
 4708 %}
 4709 
 4710 operand indOffset16NarrowAlg4_klass(iRegNsrc reg, immL16Alg4 offset) %{
 4711   predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
 4712   constraint(ALLOC_IN_RC(bits64_reg_ro));
 4713   match(AddP (DecodeNKlass reg) offset);
 4714   op_cost(100);
 4715   format %{ "[$reg + $offset]" %}
 4716   interface(MEMORY_INTER) %{
 4717     base($reg);
 4718     index(0x0);
 4719     scale(0x0);
 4720     disp($offset);
 4721   %}
 4722 %}
 4723 
 4724 //----------Special Memory Operands--------------------------------------------
 4725 // Stack Slot Operand
 4726 //
 4727 // This operand is used for loading and storing temporary values on
 4728 // the stack where a match requires a value to flow through memory.
 4729 operand stackSlotI(sRegI reg) %{
 4730   constraint(ALLOC_IN_RC(stack_slots));
 4731   op_cost(100);
 4732   //match(RegI);
 4733   format %{ "[sp+$reg]" %}
 4734   interface(MEMORY_INTER) %{
 4735     base(0x1);   // R1_SP
 4736     index(0x0);
 4737     scale(0x0);
 4738     disp($reg);  // Stack Offset
 4739   %}
 4740 %}
 4741 
 4742 operand stackSlotL(sRegL reg) %{
 4743   constraint(ALLOC_IN_RC(stack_slots));
 4744   op_cost(100);
 4745   //match(RegL);
 4746   format %{ "[sp+$reg]" %}
 4747   interface(MEMORY_INTER) %{
 4748     base(0x1);   // R1_SP
 4749     index(0x0);
 4750     scale(0x0);
 4751     disp($reg);  // Stack Offset
 4752   %}
 4753 %}
 4754 
 4755 operand stackSlotP(sRegP reg) %{
 4756   constraint(ALLOC_IN_RC(stack_slots));
 4757   op_cost(100);
 4758   //match(RegP);
 4759   format %{ "[sp+$reg]" %}
 4760   interface(MEMORY_INTER) %{
 4761     base(0x1);   // R1_SP
 4762     index(0x0);
 4763     scale(0x0);
 4764     disp($reg);  // Stack Offset
 4765   %}
 4766 %}
 4767 
 4768 operand stackSlotF(sRegF reg) %{
 4769   constraint(ALLOC_IN_RC(stack_slots));
 4770   op_cost(100);
 4771   //match(RegF);
 4772   format %{ "[sp+$reg]" %}
 4773   interface(MEMORY_INTER) %{
 4774     base(0x1);   // R1_SP
 4775     index(0x0);
 4776     scale(0x0);
 4777     disp($reg);  // Stack Offset
 4778   %}
 4779 %}
 4780 
 4781 operand stackSlotD(sRegD reg) %{
 4782   constraint(ALLOC_IN_RC(stack_slots));
 4783   op_cost(100);
 4784   //match(RegD);
 4785   format %{ "[sp+$reg]" %}
 4786   interface(MEMORY_INTER) %{
 4787     base(0x1);   // R1_SP
 4788     index(0x0);
 4789     scale(0x0);
 4790     disp($reg);  // Stack Offset
 4791   %}
 4792 %}
 4793 
 4794 // Operands for expressing Control Flow
 4795 // NOTE: Label is a predefined operand which should not be redefined in
 4796 //       the AD file. It is generically handled within the ADLC.
 4797 
 4798 //----------Conditional Branch Operands----------------------------------------
 4799 // Comparison Op
 4800 //
 4801 // This is the operation of the comparison, and is limited to the
 4802 // following set of codes: L (<), LE (<=), G (>), GE (>=), E (==), NE
 4803 // (!=).
 4804 //
 4805 // Other attributes of the comparison, such as unsignedness, are specified
 4806 // by the comparison instruction that sets a condition code flags register.
 4807 // That result is represented by a flags operand whose subtype is appropriate
 4808 // to the unsignedness (etc.) of the comparison.
 4809 //
 4810 // Later, the instruction which matches both the Comparison Op (a Bool) and
 4811 // the flags (produced by the Cmp) specifies the coding of the comparison op
 4812 // by matching a specific subtype of Bool operand below.
 4813 
 4814 // When used for floating point comparisons: unordered same as less.
 4815 operand cmpOp() %{
 4816   match(Bool);
 4817   format %{ "" %}
 4818   interface(COND_INTER) %{
 4819                            // BO only encodes bit 4 of bcondCRbiIsX, as bits 1-3 are always '100'.
 4820                            //           BO          &  BI
 4821     equal(0xA);            // 10 10:   bcondCRbiIs1 & Condition::equal
 4822     not_equal(0x2);        // 00 10:   bcondCRbiIs0 & Condition::equal
 4823     less(0x8);             // 10 00:   bcondCRbiIs1 & Condition::less
 4824     greater_equal(0x0);    // 00 00:   bcondCRbiIs0 & Condition::less
 4825     less_equal(0x1);       // 00 01:   bcondCRbiIs0 & Condition::greater
 4826     greater(0x9);          // 10 01:   bcondCRbiIs1 & Condition::greater
 4827     overflow(0xB);         // 10 11:   bcondCRbiIs1 & Condition::summary_overflow
 4828     no_overflow(0x3);      // 00 11:   bcondCRbiIs0 & Condition::summary_overflow
 4829   %}
 4830 %}
 4831 
 4832 //----------OPERAND CLASSES----------------------------------------------------
 4833 // Operand Classes are groups of operands that are used to simplify
 4834 // instruction definitions by not requiring the AD writer to specify
 4835 // separate instructions for every form of operand when the
 4836 // instruction accepts multiple operand types with the same basic
 4837 // encoding and format. The classic case of this is memory operands.
 4838 // Indirect is not included since its use is limited to Compare & Swap.
 4839 
 4840 opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indirectNarrow_klass, indOffset16Narrow, indOffset16Narrow_klass);
 4841 // Memory operand where offsets are 4-aligned. Required for ld, std.
 4842 opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4, indOffset16NarrowAlg4_klass);
 4843 opclass indirectMemory(indirect, indirectNarrow);
 4844 
 4845 // Special opclass for I and ConvL2I.
 4846 opclass iRegIsrc_iRegL2Isrc(iRegIsrc, iRegL2Isrc);
 4847 
 4848 // Operand classes to match encode and decode. iRegN_P2N is only used
 4849 // for storeN. I have never seen an encode node elsewhere.
 4850 opclass iRegN_P2N(iRegNsrc, iRegP2N);
 4851 opclass iRegP_N2P(iRegPsrc, iRegN2P, iRegN2P_klass);
 4852 
 4853 //----------PIPELINE-----------------------------------------------------------
 4854 
 4855 pipeline %{
 4856 
 4857 // See J.M.Tendler et al. "Power4 system microarchitecture", IBM
 4858 // J. Res. & Dev., No. 1, Jan. 2002.
 4859 
 4860 //----------ATTRIBUTES---------------------------------------------------------
 4861 attributes %{
 4862 
 4863   // Power4 instructions are of fixed length.
 4864   fixed_size_instructions;
 4865 
 4866   // TODO: if `bundle' means number of instructions fetched
 4867   // per cycle, this is 8. If `bundle' means Power4 `group', that is
 4868   // max instructions issued per cycle, this is 5.
 4869   max_instructions_per_bundle = 8;
 4870 
 4871   // A Power4 instruction is 4 bytes long.
 4872   instruction_unit_size = 4;
 4873 
 4874   // The Power4 processor fetches 64 bytes...
 4875   instruction_fetch_unit_size = 64;
 4876 
 4877   // ...in one line
 4878   instruction_fetch_units = 1
 4879 
 4880   // Unused, list one so that array generated by adlc is not empty.
 4881   // Aix compiler chokes if _nop_count = 0.
 4882   nops(fxNop);
 4883 %}
 4884 
 4885 //----------RESOURCES----------------------------------------------------------
 4886 // Resources are the functional units available to the machine
 4887 resources(
 4888    PPC_BR,         // branch unit
 4889    PPC_CR,         // condition unit
 4890    PPC_FX1,        // integer arithmetic unit 1
 4891    PPC_FX2,        // integer arithmetic unit 2
 4892    PPC_LDST1,      // load/store unit 1
 4893    PPC_LDST2,      // load/store unit 2
 4894    PPC_FP1,        // float arithmetic unit 1
 4895    PPC_FP2,        // float arithmetic unit 2
 4896    PPC_LDST = PPC_LDST1 | PPC_LDST2,
 4897    PPC_FX = PPC_FX1 | PPC_FX2,
 4898    PPC_FP = PPC_FP1 | PPC_FP2
 4899  );
 4900 
 4901 //----------PIPELINE DESCRIPTION-----------------------------------------------
 4902 // Pipeline Description specifies the stages in the machine's pipeline
 4903 pipe_desc(
 4904    // Power4 longest pipeline path
 4905    PPC_IF,   // instruction fetch
 4906    PPC_IC,
 4907    //PPC_BP, // branch prediction
 4908    PPC_D0,   // decode
 4909    PPC_D1,   // decode
 4910    PPC_D2,   // decode
 4911    PPC_D3,   // decode
 4912    PPC_Xfer1,
 4913    PPC_GD,   // group definition
 4914    PPC_MP,   // map
 4915    PPC_ISS,  // issue
 4916    PPC_RF,   // resource fetch
 4917    PPC_EX1,  // execute (all units)
 4918    PPC_EX2,  // execute (FP, LDST)
 4919    PPC_EX3,  // execute (FP, LDST)
 4920    PPC_EX4,  // execute (FP)
 4921    PPC_EX5,  // execute (FP)
 4922    PPC_EX6,  // execute (FP)
 4923    PPC_WB,   // write back
 4924    PPC_Xfer2,
 4925    PPC_CP
 4926  );
 4927 
 4928 //----------PIPELINE CLASSES---------------------------------------------------
 4929 // Pipeline Classes describe the stages in which input and output are
 4930 // referenced by the hardware pipeline.
 4931 
 4932 // Simple pipeline classes.
 4933 
 4934 // Default pipeline class.
 4935 pipe_class pipe_class_default() %{
 4936   single_instruction;
 4937   fixed_latency(2);
 4938 %}
 4939 
 4940 // Pipeline class for empty instructions.
 4941 pipe_class pipe_class_empty() %{
 4942   single_instruction;
 4943   fixed_latency(0);
 4944 %}
 4945 
 4946 // Pipeline class for compares.
 4947 pipe_class pipe_class_compare() %{
 4948   single_instruction;
 4949   fixed_latency(16);
 4950 %}
 4951 
 4952 // Pipeline class for traps.
 4953 pipe_class pipe_class_trap() %{
 4954   single_instruction;
 4955   fixed_latency(100);
 4956 %}
 4957 
 4958 // Pipeline class for memory operations.
 4959 pipe_class pipe_class_memory() %{
 4960   single_instruction;
 4961   fixed_latency(16);
 4962 %}
 4963 
 4964 // Pipeline class for call.
 4965 pipe_class pipe_class_call() %{
 4966   single_instruction;
 4967   fixed_latency(100);
 4968 %}
 4969 
 4970 // Define the class for the Nop node.
 4971 define %{
 4972    MachNop = pipe_class_default;
 4973 %}
 4974 
 4975 %}
 4976 
 4977 //----------INSTRUCTIONS-------------------------------------------------------
 4978 
 4979 // Naming of instructions:
 4980 //   opA_operB / opA_operB_operC:
 4981 //     Operation 'op' with one or two source operands 'oper'. Result
 4982 //     type is A, source operand types are B and C.
 4983 //     Iff A == B == C, B and C are left out.
 4984 //
 4985 // The instructions are ordered according to the following scheme:
 4986 //  - loads
 4987 //  - load constants
 4988 //  - prefetch
 4989 //  - store
 4990 //  - encode/decode
 4991 //  - membar
 4992 //  - conditional moves
 4993 //  - compare & swap
 4994 //  - arithmetic and logic operations
 4995 //    * int: Add, Sub, Mul, Div, Mod
 4996 //    * int: lShift, arShift, urShift, rot
 4997 //    * float: Add, Sub, Mul, Div
 4998 //    * and, or, xor ...
 4999 //  - register moves: float <-> int, reg <-> stack, repl
 5000 //  - cast (high level type cast, XtoP, castPP, castII, not_null etc.
 5001 //  - conv (low level type cast requiring bit changes (sign extend etc)
 5002 //  - compares, range & zero checks.
 5003 //  - branches
 5004 //  - complex operations, intrinsics, min, max, replicate
 5005 //  - lock
 5006 //  - Calls
 5007 //
 5008 // If there are similar instructions with different types they are sorted:
 5009 // int before float
 5010 // small before big
 5011 // signed before unsigned
 5012 // e.g., loadS before loadUS before loadI before loadF.
 5013 
 5014 
 5015 //----------Load/Store Instructions--------------------------------------------
 5016 
 5017 //----------Load Instructions--------------------------------------------------
 5018 
 5019 // Converts byte to int.
 5020 // As convB2I_reg, but without match rule.  The match rule of convB2I_reg
 5021 // reuses the 'amount' operand, but adlc expects that operand specification
 5022 // and operands in match rule are equivalent.
 5023 instruct convB2I_reg_2(iRegIdst dst, iRegIsrc src) %{
 5024   effect(DEF dst, USE src);
 5025   format %{ "EXTSB   $dst, $src \t// byte->int" %}
 5026   size(4);
 5027   ins_encode %{
 5028     __ extsb($dst$$Register, $src$$Register);
 5029   %}
 5030   ins_pipe(pipe_class_default);
 5031 %}
 5032 
 5033 instruct loadUB_indirect(iRegIdst dst, indirectMemory mem) %{
 5034   // match-rule, false predicate
 5035   match(Set dst (LoadB mem));
 5036   predicate(false);
 5037 
 5038   format %{ "LBZ     $dst, $mem" %}
 5039   size(4);
 5040   ins_encode( enc_lbz(dst, mem) );
 5041   ins_pipe(pipe_class_memory);
 5042 %}
 5043 
 5044 instruct loadUB_indirect_ac(iRegIdst dst, indirectMemory mem) %{
 5045   // match-rule, false predicate
 5046   match(Set dst (LoadB mem));
 5047   predicate(false);
 5048 
 5049   format %{ "LBZ     $dst, $mem\n\t"
 5050             "TWI     $dst\n\t"
 5051             "ISYNC" %}
 5052   size(12);
 5053   ins_encode( enc_lbz_ac(dst, mem) );
 5054   ins_pipe(pipe_class_memory);
 5055 %}
 5056 
 5057 // Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
 5058 instruct loadB_indirect_Ex(iRegIdst dst, indirectMemory mem) %{
 5059   match(Set dst (LoadB mem));
 5060   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5061   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
 5062   expand %{
 5063     iRegIdst tmp;
 5064     loadUB_indirect(tmp, mem);
 5065     convB2I_reg_2(dst, tmp);
 5066   %}
 5067 %}
 5068 
 5069 instruct loadB_indirect_ac_Ex(iRegIdst dst, indirectMemory mem) %{
 5070   match(Set dst (LoadB mem));
 5071   ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
 5072   expand %{
 5073     iRegIdst tmp;
 5074     loadUB_indirect_ac(tmp, mem);
 5075     convB2I_reg_2(dst, tmp);
 5076   %}
 5077 %}
 5078 
 5079 instruct loadUB_indOffset16(iRegIdst dst, indOffset16 mem) %{
 5080   // match-rule, false predicate
 5081   match(Set dst (LoadB mem));
 5082   predicate(false);
 5083 
 5084   format %{ "LBZ     $dst, $mem" %}
 5085   size(4);
 5086   ins_encode( enc_lbz(dst, mem) );
 5087   ins_pipe(pipe_class_memory);
 5088 %}
 5089 
 5090 instruct loadUB_indOffset16_ac(iRegIdst dst, indOffset16 mem) %{
 5091   // match-rule, false predicate
 5092   match(Set dst (LoadB mem));
 5093   predicate(false);
 5094 
 5095   format %{ "LBZ     $dst, $mem\n\t"
 5096             "TWI     $dst\n\t"
 5097             "ISYNC" %}
 5098   size(12);
 5099   ins_encode( enc_lbz_ac(dst, mem) );
 5100   ins_pipe(pipe_class_memory);
 5101 %}
 5102 
 5103 // Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
 5104 instruct loadB_indOffset16_Ex(iRegIdst dst, indOffset16 mem) %{
 5105   match(Set dst (LoadB mem));
 5106   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5107   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
 5108 
 5109   expand %{
 5110     iRegIdst tmp;
 5111     loadUB_indOffset16(tmp, mem);
 5112     convB2I_reg_2(dst, tmp);
 5113   %}
 5114 %}
 5115 
 5116 instruct loadB_indOffset16_ac_Ex(iRegIdst dst, indOffset16 mem) %{
 5117   match(Set dst (LoadB mem));
 5118   ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
 5119 
 5120   expand %{
 5121     iRegIdst tmp;
 5122     loadUB_indOffset16_ac(tmp, mem);
 5123     convB2I_reg_2(dst, tmp);
 5124   %}
 5125 %}
 5126 
 5127 // Load Unsigned Byte (8bit UNsigned) into an int reg.
 5128 instruct loadUB(iRegIdst dst, memory mem) %{
 5129   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5130   match(Set dst (LoadUB mem));
 5131   ins_cost(MEMORY_REF_COST);
 5132 
 5133   format %{ "LBZ     $dst, $mem \t// byte, zero-extend to int" %}
 5134   size(4);
 5135   ins_encode( enc_lbz(dst, mem) );
 5136   ins_pipe(pipe_class_memory);
 5137 %}
 5138 
 5139 // Load  Unsigned Byte (8bit UNsigned) acquire.
 5140 instruct loadUB_ac(iRegIdst dst, memory mem) %{
 5141   match(Set dst (LoadUB mem));
 5142   ins_cost(3*MEMORY_REF_COST);
 5143 
 5144   format %{ "LBZ     $dst, $mem \t// byte, zero-extend to int, acquire\n\t"
 5145             "TWI     $dst\n\t"
 5146             "ISYNC" %}
 5147   size(12);
 5148   ins_encode( enc_lbz_ac(dst, mem) );
 5149   ins_pipe(pipe_class_memory);
 5150 %}
 5151 
 5152 // Load Unsigned Byte (8bit UNsigned) into a Long Register.
 5153 instruct loadUB2L(iRegLdst dst, memory mem) %{
 5154   match(Set dst (ConvI2L (LoadUB mem)));
 5155   predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
 5156   ins_cost(MEMORY_REF_COST);
 5157 
 5158   format %{ "LBZ     $dst, $mem \t// byte, zero-extend to long" %}
 5159   size(4);
 5160   ins_encode( enc_lbz(dst, mem) );
 5161   ins_pipe(pipe_class_memory);
 5162 %}
 5163 
 5164 instruct loadUB2L_ac(iRegLdst dst, memory mem) %{
 5165   match(Set dst (ConvI2L (LoadUB mem)));
 5166   ins_cost(3*MEMORY_REF_COST);
 5167 
 5168   format %{ "LBZ     $dst, $mem \t// byte, zero-extend to long, acquire\n\t"
 5169             "TWI     $dst\n\t"
 5170             "ISYNC" %}
 5171   size(12);
 5172   ins_encode( enc_lbz_ac(dst, mem) );
 5173   ins_pipe(pipe_class_memory);
 5174 %}
 5175 
 5176 // Load Short (16bit signed)
 5177 instruct loadS(iRegIdst dst, memory mem) %{
 5178   match(Set dst (LoadS mem));
 5179   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5180   ins_cost(MEMORY_REF_COST);
 5181 
 5182   format %{ "LHA     $dst, $mem" %}
 5183   size(4);
 5184   ins_encode %{
 5185     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5186     __ lha($dst$$Register, Idisp, $mem$$base$$Register);
 5187   %}
 5188   ins_pipe(pipe_class_memory);
 5189 %}
 5190 
 5191 // Load Short (16bit signed) acquire.
 5192 instruct loadS_ac(iRegIdst dst, memory mem) %{
 5193   match(Set dst (LoadS mem));
 5194   ins_cost(3*MEMORY_REF_COST);
 5195 
 5196   format %{ "LHA     $dst, $mem\t acquire\n\t"
 5197             "TWI     $dst\n\t"
 5198             "ISYNC" %}
 5199   size(12);
 5200   ins_encode %{
 5201     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5202     __ lha($dst$$Register, Idisp, $mem$$base$$Register);
 5203     __ twi_0($dst$$Register);
 5204     __ isync();
 5205   %}
 5206   ins_pipe(pipe_class_memory);
 5207 %}
 5208 
 5209 // Load Char (16bit unsigned)
 5210 instruct loadUS(iRegIdst dst, memory mem) %{
 5211   match(Set dst (LoadUS mem));
 5212   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5213   ins_cost(MEMORY_REF_COST);
 5214 
 5215   format %{ "LHZ     $dst, $mem" %}
 5216   size(4);
 5217   ins_encode( enc_lhz(dst, mem) );
 5218   ins_pipe(pipe_class_memory);
 5219 %}
 5220 
 5221 // Load Char (16bit unsigned) acquire.
 5222 instruct loadUS_ac(iRegIdst dst, memory mem) %{
 5223   match(Set dst (LoadUS mem));
 5224   ins_cost(3*MEMORY_REF_COST);
 5225 
 5226   format %{ "LHZ     $dst, $mem \t// acquire\n\t"
 5227             "TWI     $dst\n\t"
 5228             "ISYNC" %}
 5229   size(12);
 5230   ins_encode( enc_lhz_ac(dst, mem) );
 5231   ins_pipe(pipe_class_memory);
 5232 %}
 5233 
 5234 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register.
 5235 instruct loadUS2L(iRegLdst dst, memory mem) %{
 5236   match(Set dst (ConvI2L (LoadUS mem)));
 5237   predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
 5238   ins_cost(MEMORY_REF_COST);
 5239 
 5240   format %{ "LHZ     $dst, $mem \t// short, zero-extend to long" %}
 5241   size(4);
 5242   ins_encode( enc_lhz(dst, mem) );
 5243   ins_pipe(pipe_class_memory);
 5244 %}
 5245 
 5246 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register acquire.
 5247 instruct loadUS2L_ac(iRegLdst dst, memory mem) %{
 5248   match(Set dst (ConvI2L (LoadUS mem)));
 5249   ins_cost(3*MEMORY_REF_COST);
 5250 
 5251   format %{ "LHZ     $dst, $mem \t// short, zero-extend to long, acquire\n\t"
 5252             "TWI     $dst\n\t"
 5253             "ISYNC" %}
 5254   size(12);
 5255   ins_encode( enc_lhz_ac(dst, mem) );
 5256   ins_pipe(pipe_class_memory);
 5257 %}
 5258 
 5259 // Load Integer.
 5260 instruct loadI(iRegIdst dst, memory mem) %{
 5261   match(Set dst (LoadI mem));
 5262   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5263   ins_cost(MEMORY_REF_COST);
 5264 
 5265   format %{ "LWZ     $dst, $mem" %}
 5266   size(4);
 5267   ins_encode( enc_lwz(dst, mem) );
 5268   ins_pipe(pipe_class_memory);
 5269 %}
 5270 
 5271 // Load Integer acquire.
 5272 instruct loadI_ac(iRegIdst dst, memory mem) %{
 5273   match(Set dst (LoadI mem));
 5274   ins_cost(3*MEMORY_REF_COST);
 5275 
 5276   format %{ "LWZ     $dst, $mem \t// load acquire\n\t"
 5277             "TWI     $dst\n\t"
 5278             "ISYNC" %}
 5279   size(12);
 5280   ins_encode( enc_lwz_ac(dst, mem) );
 5281   ins_pipe(pipe_class_memory);
 5282 %}
 5283 
 5284 // Match loading integer and casting it to unsigned int in
 5285 // long register.
 5286 // LoadI + ConvI2L + AndL 0xffffffff.
 5287 instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
 5288   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 5289   predicate(_kids[0]->_kids[0]->_leaf->as_Load()->is_unordered());
 5290   ins_cost(MEMORY_REF_COST);
 5291 
 5292   format %{ "LWZ     $dst, $mem \t// zero-extend to long" %}
 5293   size(4);
 5294   ins_encode( enc_lwz(dst, mem) );
 5295   ins_pipe(pipe_class_memory);
 5296 %}
 5297 
 5298 // Match loading integer and casting it to long.
 5299 instruct loadI2L(iRegLdst dst, memoryAlg4 mem) %{
 5300   match(Set dst (ConvI2L (LoadI mem)));
 5301   predicate(_kids[0]->_leaf->as_Load()->is_unordered());
 5302   ins_cost(MEMORY_REF_COST);
 5303 
 5304   format %{ "LWA     $dst, $mem \t// loadI2L" %}
 5305   size(4);
 5306   ins_encode %{
 5307     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5308     __ lwa($dst$$Register, Idisp, $mem$$base$$Register);
 5309   %}
 5310   ins_pipe(pipe_class_memory);
 5311 %}
 5312 
 5313 // Match loading integer and casting it to long - acquire.
 5314 instruct loadI2L_ac(iRegLdst dst, memoryAlg4 mem) %{
 5315   match(Set dst (ConvI2L (LoadI mem)));
 5316   ins_cost(3*MEMORY_REF_COST);
 5317 
 5318   format %{ "LWA     $dst, $mem \t// loadI2L acquire"
 5319             "TWI     $dst\n\t"
 5320             "ISYNC" %}
 5321   size(12);
 5322   ins_encode %{
 5323     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5324     __ lwa($dst$$Register, Idisp, $mem$$base$$Register);
 5325     __ twi_0($dst$$Register);
 5326     __ isync();
 5327   %}
 5328   ins_pipe(pipe_class_memory);
 5329 %}
 5330 
 5331 // Load Long - aligned
 5332 instruct loadL(iRegLdst dst, memoryAlg4 mem) %{
 5333   match(Set dst (LoadL mem));
 5334   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5335   ins_cost(MEMORY_REF_COST);
 5336 
 5337   format %{ "LD      $dst, $mem \t// long" %}
 5338   size(4);
 5339   ins_encode( enc_ld(dst, mem) );
 5340   ins_pipe(pipe_class_memory);
 5341 %}
 5342 
 5343 // Load Long - aligned acquire.
 5344 instruct loadL_ac(iRegLdst dst, memoryAlg4 mem) %{
 5345   match(Set dst (LoadL mem));
 5346   ins_cost(3*MEMORY_REF_COST);
 5347 
 5348   format %{ "LD      $dst, $mem \t// long acquire\n\t"
 5349             "TWI     $dst\n\t"
 5350             "ISYNC" %}
 5351   size(12);
 5352   ins_encode( enc_ld_ac(dst, mem) );
 5353   ins_pipe(pipe_class_memory);
 5354 %}
 5355 
 5356 // Load Long - UNaligned
 5357 instruct loadL_unaligned(iRegLdst dst, memoryAlg4 mem) %{
 5358   match(Set dst (LoadL_unaligned mem));
 5359   // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
 5360   ins_cost(MEMORY_REF_COST);
 5361 
 5362   format %{ "LD      $dst, $mem \t// unaligned long" %}
 5363   size(4);
 5364   ins_encode( enc_ld(dst, mem) );
 5365   ins_pipe(pipe_class_memory);
 5366 %}
 5367 
 5368 // Load nodes for superwords
 5369 
 5370 // Load Aligned Packed Byte
 5371 instruct loadV8(iRegLdst dst, memoryAlg4 mem) %{
 5372   predicate(n->as_LoadVector()->memory_size() == 8);
 5373   match(Set dst (LoadVector mem));
 5374   ins_cost(MEMORY_REF_COST);
 5375 
 5376   format %{ "LD      $dst, $mem \t// load 8-byte Vector" %}
 5377   size(4);
 5378   ins_encode( enc_ld(dst, mem) );
 5379   ins_pipe(pipe_class_memory);
 5380 %}
 5381 
 5382 // Load Aligned Packed Byte
 5383 instruct loadV16(vecX dst, indirect mem) %{
 5384   predicate(n->as_LoadVector()->memory_size() == 16);
 5385   match(Set dst (LoadVector mem));
 5386   ins_cost(MEMORY_REF_COST);
 5387 
 5388   format %{ "LXVD2X      $dst, $mem \t// load 16-byte Vector" %}
 5389   size(4);
 5390   ins_encode %{
 5391     __ lxvd2x($dst$$VectorSRegister, $mem$$Register);
 5392   %}
 5393   ins_pipe(pipe_class_default);
 5394 %}
 5395 
 5396 // Load Range, range = array length (=jint)
 5397 instruct loadRange(iRegIdst dst, memory mem) %{
 5398   match(Set dst (LoadRange mem));
 5399   ins_cost(MEMORY_REF_COST);
 5400 
 5401   format %{ "LWZ     $dst, $mem \t// range" %}
 5402   size(4);
 5403   ins_encode( enc_lwz(dst, mem) );
 5404   ins_pipe(pipe_class_memory);
 5405 %}
 5406 
 5407 // Load Compressed Pointer
 5408 instruct loadN(iRegNdst dst, memory mem) %{
 5409   match(Set dst (LoadN mem));
 5410   predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
 5411   ins_cost(MEMORY_REF_COST);
 5412 
 5413   format %{ "LWZ     $dst, $mem \t// load compressed ptr" %}
 5414   size(4);
 5415   ins_encode( enc_lwz(dst, mem) );
 5416   ins_pipe(pipe_class_memory);
 5417 %}
 5418 
 5419 // Load Compressed Pointer acquire.
 5420 instruct loadN_ac(iRegNdst dst, memory mem) %{
 5421   match(Set dst (LoadN mem));
 5422   predicate(n->as_Load()->barrier_data() == 0);
 5423   ins_cost(3*MEMORY_REF_COST);
 5424 
 5425   format %{ "LWZ     $dst, $mem \t// load acquire compressed ptr\n\t"
 5426             "TWI     $dst\n\t"
 5427             "ISYNC" %}
 5428   size(12);
 5429   ins_encode( enc_lwz_ac(dst, mem) );
 5430   ins_pipe(pipe_class_memory);
 5431 %}
 5432 
 5433 // Load Compressed Pointer and decode it if narrow_oop_shift == 0.
 5434 instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
 5435   match(Set dst (DecodeN (LoadN mem)));
 5436   predicate(_kids[0]->_leaf->as_Load()->is_unordered() && CompressedOops::shift() == 0 && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
 5437   ins_cost(MEMORY_REF_COST);
 5438 
 5439   format %{ "LWZ     $dst, $mem \t// DecodeN (unscaled)" %}
 5440   size(4);
 5441   ins_encode( enc_lwz(dst, mem) );
 5442   ins_pipe(pipe_class_memory);
 5443 %}
 5444 
 5445 instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
 5446   match(Set dst (DecodeNKlass (LoadNKlass mem)));
 5447   predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0 &&
 5448             _kids[0]->_leaf->as_Load()->is_unordered());
 5449   ins_cost(MEMORY_REF_COST);
 5450 
 5451   format %{ "LWZ     $dst, $mem \t// DecodeN (unscaled)" %}
 5452   size(4);
 5453   ins_encode( enc_lwz(dst, mem) );
 5454   ins_pipe(pipe_class_memory);
 5455 %}
 5456 
 5457 // Load Pointer
 5458 instruct loadP(iRegPdst dst, memoryAlg4 mem) %{
 5459   match(Set dst (LoadP mem));
 5460   predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
 5461   ins_cost(MEMORY_REF_COST);
 5462 
 5463   format %{ "LD      $dst, $mem \t// ptr" %}
 5464   size(4);
 5465   ins_encode( enc_ld(dst, mem) );
 5466   ins_pipe(pipe_class_memory);
 5467 %}
 5468 
 5469 // Load Pointer acquire.
 5470 instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{
 5471   match(Set dst (LoadP mem));
 5472   ins_cost(3*MEMORY_REF_COST);
 5473 
 5474   predicate(n->as_Load()->barrier_data() == 0);
 5475 
 5476   format %{ "LD      $dst, $mem \t// ptr acquire\n\t"
 5477             "TWI     $dst\n\t"
 5478             "ISYNC" %}
 5479   size(12);
 5480   ins_encode( enc_ld_ac(dst, mem) );
 5481   ins_pipe(pipe_class_memory);
 5482 %}
 5483 
 5484 // LoadP + CastP2L
 5485 instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{
 5486   match(Set dst (CastP2X (LoadP mem)));
 5487   predicate(_kids[0]->_leaf->as_Load()->is_unordered() && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
 5488   ins_cost(MEMORY_REF_COST);
 5489 
 5490   format %{ "LD      $dst, $mem \t// ptr + p2x" %}
 5491   size(4);
 5492   ins_encode( enc_ld(dst, mem) );
 5493   ins_pipe(pipe_class_memory);
 5494 %}
 5495 
 5496 // Load compressed klass pointer.
 5497 instruct loadNKlass(iRegNdst dst, memory mem) %{
 5498   match(Set dst (LoadNKlass mem));
 5499   ins_cost(MEMORY_REF_COST);
 5500 
 5501   format %{ "LWZ     $dst, $mem \t// compressed klass ptr" %}
 5502   size(4);
 5503   ins_encode( enc_lwz(dst, mem) );
 5504   ins_pipe(pipe_class_memory);
 5505 %}
 5506 
 5507 // Load Klass Pointer
 5508 instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
 5509   match(Set dst (LoadKlass mem));
 5510   ins_cost(MEMORY_REF_COST);
 5511 
 5512   format %{ "LD      $dst, $mem \t// klass ptr" %}
 5513   size(4);
 5514   ins_encode( enc_ld(dst, mem) );
 5515   ins_pipe(pipe_class_memory);
 5516 %}
 5517 
 5518 // Load Float
 5519 instruct loadF(regF dst, memory mem) %{
 5520   match(Set dst (LoadF mem));
 5521   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5522   ins_cost(MEMORY_REF_COST);
 5523 
 5524   format %{ "LFS     $dst, $mem" %}
 5525   size(4);
 5526   ins_encode %{
 5527     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5528     __ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
 5529   %}
 5530   ins_pipe(pipe_class_memory);
 5531 %}
 5532 
 5533 // Load Float acquire.
 5534 instruct loadF_ac(regF dst, memory mem, flagsRegCR0 cr0) %{
 5535   match(Set dst (LoadF mem));
 5536   effect(TEMP cr0);
 5537   ins_cost(3*MEMORY_REF_COST);
 5538 
 5539   format %{ "LFS     $dst, $mem \t// acquire\n\t"
 5540             "FCMPU   cr0, $dst, $dst\n\t"
 5541             "BNE     cr0, next\n"
 5542             "next:\n\t"
 5543             "ISYNC" %}
 5544   size(16);
 5545   ins_encode %{
 5546     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5547     Label next;
 5548     __ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
 5549     __ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
 5550     __ bne(CCR0, next);
 5551     __ bind(next);
 5552     __ isync();
 5553   %}
 5554   ins_pipe(pipe_class_memory);
 5555 %}
 5556 
 5557 // Load Double - aligned
 5558 instruct loadD(regD dst, memory mem) %{
 5559   match(Set dst (LoadD mem));
 5560   predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
 5561   ins_cost(MEMORY_REF_COST);
 5562 
 5563   format %{ "LFD     $dst, $mem" %}
 5564   size(4);
 5565   ins_encode( enc_lfd(dst, mem) );
 5566   ins_pipe(pipe_class_memory);
 5567 %}
 5568 
 5569 // Load Double - aligned acquire.
 5570 instruct loadD_ac(regD dst, memory mem, flagsRegCR0 cr0) %{
 5571   match(Set dst (LoadD mem));
 5572   effect(TEMP cr0);
 5573   ins_cost(3*MEMORY_REF_COST);
 5574 
 5575   format %{ "LFD     $dst, $mem \t// acquire\n\t"
 5576             "FCMPU   cr0, $dst, $dst\n\t"
 5577             "BNE     cr0, next\n"
 5578             "next:\n\t"
 5579             "ISYNC" %}
 5580   size(16);
 5581   ins_encode %{
 5582     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 5583     Label next;
 5584     __ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
 5585     __ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
 5586     __ bne(CCR0, next);
 5587     __ bind(next);
 5588     __ isync();
 5589   %}
 5590   ins_pipe(pipe_class_memory);
 5591 %}
 5592 
 5593 // Load Double - UNaligned
 5594 instruct loadD_unaligned(regD dst, memory mem) %{
 5595   match(Set dst (LoadD_unaligned mem));
 5596   // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
 5597   ins_cost(MEMORY_REF_COST);
 5598 
 5599   format %{ "LFD     $dst, $mem" %}
 5600   size(4);
 5601   ins_encode( enc_lfd(dst, mem) );
 5602   ins_pipe(pipe_class_memory);
 5603 %}
 5604 
 5605 //----------Constants--------------------------------------------------------
 5606 
 5607 // Load MachConstantTableBase: add hi offset to global toc.
 5608 // TODO: Handle hidden register r29 in bundler!
 5609 instruct loadToc_hi(iRegLdst dst) %{
 5610   effect(DEF dst);
 5611   ins_cost(DEFAULT_COST);
 5612 
 5613   format %{ "ADDIS   $dst, R29, DISP.hi \t// load TOC hi" %}
 5614   size(4);
 5615   ins_encode %{
 5616     __ calculate_address_from_global_toc_hi16only($dst$$Register, __ method_toc());
 5617   %}
 5618   ins_pipe(pipe_class_default);
 5619 %}
 5620 
 5621 // Load MachConstantTableBase: add lo offset to global toc.
 5622 instruct loadToc_lo(iRegLdst dst, iRegLdst src) %{
 5623   effect(DEF dst, USE src);
 5624   ins_cost(DEFAULT_COST);
 5625 
 5626   format %{ "ADDI    $dst, $src, DISP.lo \t// load TOC lo" %}
 5627   size(4);
 5628   ins_encode %{
 5629     __ calculate_address_from_global_toc_lo16only($dst$$Register, __ method_toc());
 5630   %}
 5631   ins_pipe(pipe_class_default);
 5632 %}
 5633 
 5634 // Load 16-bit integer constant 0xssss????
 5635 instruct loadConI16(iRegIdst dst, immI16 src) %{
 5636   match(Set dst src);
 5637 
 5638   format %{ "LI      $dst, $src" %}
 5639   size(4);
 5640   ins_encode %{
 5641     __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
 5642   %}
 5643   ins_pipe(pipe_class_default);
 5644 %}
 5645 
 5646 // Load integer constant 0x????0000
 5647 instruct loadConIhi16(iRegIdst dst, immIhi16 src) %{
 5648   match(Set dst src);
 5649   ins_cost(DEFAULT_COST);
 5650 
 5651   format %{ "LIS     $dst, $src.hi" %}
 5652   size(4);
 5653   ins_encode %{
 5654     // Lis sign extends 16-bit src then shifts it 16 bit to the left.
 5655     __ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
 5656   %}
 5657   ins_pipe(pipe_class_default);
 5658 %}
 5659 
 5660 // Part 2 of loading 32 bit constant: hi16 is is src1 (properly shifted
 5661 // and sign extended), this adds the low 16 bits.
 5662 instruct loadConI32_lo16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
 5663   // no match-rule, false predicate
 5664   effect(DEF dst, USE src1, USE src2);
 5665   predicate(false);
 5666 
 5667   format %{ "ORI     $dst, $src1.hi, $src2.lo" %}
 5668   size(4);
 5669   ins_encode %{
 5670     __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
 5671   %}
 5672   ins_pipe(pipe_class_default);
 5673 %}
 5674 
 5675 instruct loadConI32(iRegIdst dst, immI32 src) %{
 5676   match(Set dst src);
 5677   // This macro is valid only in Power 10 and up, but adding the following predicate here
 5678   // caused a build error, so we comment it out for now.
 5679   // predicate(PowerArchitecturePPC64 >= 10);
 5680   ins_cost(DEFAULT_COST+1);
 5681 
 5682   format %{ "PLI     $dst, $src" %}
 5683   size(8);
 5684   ins_encode %{
 5685     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
 5686     __ pli($dst$$Register, $src$$constant);
 5687   %}
 5688   ins_pipe(pipe_class_default);
 5689   ins_alignment(2);
 5690 %}
 5691 
 5692 instruct loadConI_Ex(iRegIdst dst, immI src) %{
 5693   match(Set dst src);
 5694   ins_cost(DEFAULT_COST*2);
 5695 
 5696   expand %{
 5697     // Would like to use $src$$constant.
 5698     immI16 srcLo %{ _opnds[1]->constant() %}
 5699     // srcHi can be 0000 if srcLo sign-extends to a negative number.
 5700     immIhi16 srcHi %{ _opnds[1]->constant() %}
 5701     iRegIdst tmpI;
 5702     loadConIhi16(tmpI, srcHi);
 5703     loadConI32_lo16(dst, tmpI, srcLo);
 5704   %}
 5705 %}
 5706 
 5707 // No constant pool entries required.
 5708 instruct loadConL16(iRegLdst dst, immL16 src) %{
 5709   match(Set dst src);
 5710 
 5711   format %{ "LI      $dst, $src \t// long" %}
 5712   size(4);
 5713   ins_encode %{
 5714     __ li($dst$$Register, (int)((short) ($src$$constant & 0xFFFF)));
 5715   %}
 5716   ins_pipe(pipe_class_default);
 5717 %}
 5718 
 5719 // Load long constant 0xssssssss????0000
 5720 instruct loadConL32hi16(iRegLdst dst, immL32hi16 src) %{
 5721   match(Set dst src);
 5722   ins_cost(DEFAULT_COST);
 5723 
 5724   format %{ "LIS     $dst, $src.hi \t// long" %}
 5725   size(4);
 5726   ins_encode %{
 5727     __ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
 5728   %}
 5729   ins_pipe(pipe_class_default);
 5730 %}
 5731 
 5732 // To load a 32 bit constant: merge lower 16 bits into already loaded
 5733 // high 16 bits.
 5734 instruct loadConL32_lo16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
 5735   // no match-rule, false predicate
 5736   effect(DEF dst, USE src1, USE src2);
 5737   predicate(false);
 5738 
 5739   format %{ "ORI     $dst, $src1, $src2.lo" %}
 5740   size(4);
 5741   ins_encode %{
 5742     __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
 5743   %}
 5744   ins_pipe(pipe_class_default);
 5745 %}
 5746 
 5747 // Load 32-bit long constant
 5748 instruct loadConL32_Ex(iRegLdst dst, immL32 src) %{
 5749   match(Set dst src);
 5750   ins_cost(DEFAULT_COST*2);
 5751 
 5752   expand %{
 5753     // Would like to use $src$$constant.
 5754     immL16     srcLo %{ _opnds[1]->constant() /*& 0x0000FFFFL */%}
 5755     // srcHi can be 0000 if srcLo sign-extends to a negative number.
 5756     immL32hi16 srcHi %{ _opnds[1]->constant() /*& 0xFFFF0000L */%}
 5757     iRegLdst tmpL;
 5758     loadConL32hi16(tmpL, srcHi);
 5759     loadConL32_lo16(dst, tmpL, srcLo);
 5760   %}
 5761 %}
 5762 
 5763 // Load 34-bit long constant using prefixed addi. No constant pool entries required.
 5764 instruct loadConL34(iRegLdst dst, immL34 src) %{
 5765   match(Set dst src);
 5766   // This macro is valid only in Power 10 and up, but adding the following predicate here
 5767   // caused a build error, so we comment it out for now.
 5768   // predicate(PowerArchitecturePPC64 >= 10);
 5769   ins_cost(DEFAULT_COST+1);
 5770 
 5771   format %{ "PLI     $dst, $src \t// long" %}
 5772   size(8);
 5773   ins_encode %{
 5774     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
 5775     __ pli($dst$$Register, $src$$constant);
 5776   %}
 5777   ins_pipe(pipe_class_default);
 5778   ins_alignment(2);
 5779 %}
 5780 
 5781 // Load long constant 0x????000000000000.
 5782 instruct loadConLhighest16_Ex(iRegLdst dst, immLhighest16 src) %{
 5783   match(Set dst src);
 5784   ins_cost(DEFAULT_COST);
 5785 
 5786   expand %{
 5787     immL32hi16 srcHi %{ _opnds[1]->constant() >> 32 /*& 0xFFFF0000L */%}
 5788     immI shift32 %{ 32 %}
 5789     iRegLdst tmpL;
 5790     loadConL32hi16(tmpL, srcHi);
 5791     lshiftL_regL_immI(dst, tmpL, shift32);
 5792   %}
 5793 %}
 5794 
 5795 // Expand node for constant pool load: small offset.
 5796 instruct loadConL(iRegLdst dst, immL src, iRegLdst toc) %{
 5797   effect(DEF dst, USE src, USE toc);
 5798   ins_cost(MEMORY_REF_COST);
 5799 
 5800   ins_num_consts(1);
 5801   // Needed so that CallDynamicJavaDirect can compute the address of this
 5802   // instruction for relocation.
 5803   ins_field_cbuf_insts_offset(int);
 5804 
 5805   format %{ "LD      $dst, offset, $toc \t// load long $src from TOC" %}
 5806   size(4);
 5807   ins_encode( enc_load_long_constL(dst, src, toc) );
 5808   ins_pipe(pipe_class_memory);
 5809 %}
 5810 
 5811 // Expand node for constant pool load: large offset.
 5812 instruct loadConL_hi(iRegLdst dst, immL src, iRegLdst toc) %{
 5813   effect(DEF dst, USE src, USE toc);
 5814   predicate(false);
 5815 
 5816   ins_num_consts(1);
 5817   ins_field_const_toc_offset(int);
 5818   // Needed so that CallDynamicJavaDirect can compute the address of this
 5819   // instruction for relocation.
 5820   ins_field_cbuf_insts_offset(int);
 5821 
 5822   format %{ "ADDIS   $dst, $toc, offset \t// load long $src from TOC (hi)" %}
 5823   size(4);
 5824   ins_encode( enc_load_long_constL_hi(dst, toc, src) );
 5825   ins_pipe(pipe_class_default);
 5826 %}
 5827 
 5828 // Expand node for constant pool load: large offset.
 5829 // No constant pool entries required.
 5830 instruct loadConL_lo(iRegLdst dst, immL src, iRegLdst base) %{
 5831   effect(DEF dst, USE src, USE base);
 5832   predicate(false);
 5833 
 5834   ins_field_const_toc_offset_hi_node(loadConL_hiNode*);
 5835 
 5836   format %{ "LD      $dst, offset, $base \t// load long $src from TOC (lo)" %}
 5837   size(4);
 5838   ins_encode %{
 5839     int offset = ra_->C->output()->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
 5840     __ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
 5841   %}
 5842   ins_pipe(pipe_class_memory);
 5843 %}
 5844 
 5845 // Load long constant from constant table. Expand in case of
 5846 // offset > 16 bit is needed.
 5847 // Adlc adds toc node MachConstantTableBase.
 5848 instruct loadConL_Ex(iRegLdst dst, immL src) %{
 5849   match(Set dst src);
 5850   ins_cost(MEMORY_REF_COST);
 5851 
 5852   format %{ "LD      $dst, offset, $constanttablebase\t// load long $src from table, postalloc expanded" %}
 5853   // We can not inline the enc_class for the expand as that does not support constanttablebase.
 5854   postalloc_expand( postalloc_expand_load_long_constant(dst, src, constanttablebase) );
 5855 %}
 5856 
 5857 // Load nullptr as compressed oop.
 5858 instruct loadConN0(iRegNdst dst, immN_0 src) %{
 5859   match(Set dst src);
 5860   ins_cost(DEFAULT_COST);
 5861 
 5862   format %{ "LI      $dst, $src \t// compressed ptr" %}
 5863   size(4);
 5864   ins_encode %{
 5865     __ li($dst$$Register, 0);
 5866   %}
 5867   ins_pipe(pipe_class_default);
 5868 %}
 5869 
 5870 // Load hi part of compressed oop constant.
 5871 instruct loadConN_hi(iRegNdst dst, immN src) %{
 5872   effect(DEF dst, USE src);
 5873   ins_cost(DEFAULT_COST);
 5874 
 5875   format %{ "LIS     $dst, $src \t// narrow oop hi" %}
 5876   size(4);
 5877   ins_encode %{
 5878     __ lis($dst$$Register, 0); // Will get patched.
 5879   %}
 5880   ins_pipe(pipe_class_default);
 5881 %}
 5882 
 5883 // Add lo part of compressed oop constant to already loaded hi part.
 5884 instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
 5885   effect(DEF dst, USE src1, USE src2);
 5886   ins_cost(DEFAULT_COST);
 5887 
 5888   format %{ "ORI     $dst, $src1, $src2 \t// narrow oop lo" %}
 5889   size(4);
 5890   ins_encode %{
 5891     AddressLiteral addrlit = __ constant_oop_address((jobject)$src2$$constant);
 5892     __ relocate(addrlit.rspec(), /*compressed format*/ 1);
 5893     __ ori($dst$$Register, $src1$$Register, 0); // Will get patched.
 5894   %}
 5895   ins_pipe(pipe_class_default);
 5896 %}
 5897 
 5898 instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
 5899   effect(DEF dst, USE src, USE shift, USE mask_begin);
 5900 
 5901   size(4);
 5902   ins_encode %{
 5903     __ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
 5904   %}
 5905   ins_pipe(pipe_class_default);
 5906 %}
 5907 
 5908 // Needed to postalloc expand loadConN: ConN is loaded as ConI
 5909 // leaving the upper 32 bits with sign-extension bits.
 5910 // This clears these bits: dst = src & 0xFFFFFFFF.
 5911 // TODO: Eventually call this maskN_regN_FFFFFFFF.
 5912 instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{
 5913   effect(DEF dst, USE src);
 5914   predicate(false);
 5915 
 5916   format %{ "MASK    $dst, $src, 0xFFFFFFFF" %} // mask
 5917   size(4);
 5918   ins_encode %{
 5919     __ clrldi($dst$$Register, $src$$Register, 0x20);
 5920   %}
 5921   ins_pipe(pipe_class_default);
 5922 %}
 5923 
 5924 // Optimize DecodeN for disjoint base.
 5925 // Load base of compressed oops into a register
 5926 instruct loadBase(iRegLdst dst) %{
 5927   effect(DEF dst);
 5928 
 5929   format %{ "LoadConst $dst, heapbase" %}
 5930   ins_encode %{
 5931     __ load_const_optimized($dst$$Register, CompressedOops::base(), R0);
 5932   %}
 5933   ins_pipe(pipe_class_default);
 5934 %}
 5935 
 5936 // Loading ConN must be postalloc expanded so that edges between
 5937 // the nodes are safe. They may not interfere with a safepoint.
 5938 // GL TODO: This needs three instructions: better put this into the constant pool.
 5939 instruct loadConN_Ex(iRegNdst dst, immN src) %{
 5940   match(Set dst src);
 5941   ins_cost(DEFAULT_COST*2);
 5942 
 5943   format %{ "LoadN   $dst, $src \t// postalloc expanded" %} // mask
 5944   postalloc_expand %{
 5945     MachNode *m1 = new loadConN_hiNode();
 5946     MachNode *m2 = new loadConN_loNode();
 5947     MachNode *m3 = new clearMs32bNode();
 5948     m1->add_req(nullptr);
 5949     m2->add_req(nullptr, m1);
 5950     m3->add_req(nullptr, m2);
 5951     m1->_opnds[0] = op_dst;
 5952     m1->_opnds[1] = op_src;
 5953     m2->_opnds[0] = op_dst;
 5954     m2->_opnds[1] = op_dst;
 5955     m2->_opnds[2] = op_src;
 5956     m3->_opnds[0] = op_dst;
 5957     m3->_opnds[1] = op_dst;
 5958     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 5959     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 5960     ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 5961     nodes->push(m1);
 5962     nodes->push(m2);
 5963     nodes->push(m3);
 5964   %}
 5965 %}
 5966 
 5967 // We have seen a safepoint between the hi and lo parts, and this node was handled
 5968 // as an oop. Therefore this needs a match rule so that build_oop_map knows this is
 5969 // not a narrow oop.
 5970 instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
 5971   match(Set dst src);
 5972   effect(DEF dst, USE src);
 5973   ins_cost(DEFAULT_COST);
 5974 
 5975   format %{ "LIS     $dst, $src \t// narrow klass hi" %}
 5976   size(4);
 5977   ins_encode %{
 5978     intptr_t Csrc = CompressedKlassPointers::encode((Klass *)$src$$constant);
 5979     __ lis($dst$$Register, (int)(short)((Csrc >> 16) & 0xffff));
 5980   %}
 5981   ins_pipe(pipe_class_default);
 5982 %}
 5983 
 5984 // As loadConNKlass_hi this must be recognized as narrow klass, not oop!
 5985 instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
 5986   match(Set dst src1);
 5987   effect(TEMP src2);
 5988   ins_cost(DEFAULT_COST);
 5989 
 5990   format %{ "MASK    $dst, $src2, 0xFFFFFFFF" %} // mask
 5991   size(4);
 5992   ins_encode %{
 5993     __ clrldi($dst$$Register, $src2$$Register, 0x20);
 5994   %}
 5995   ins_pipe(pipe_class_default);
 5996 %}
 5997 
 5998 // This needs a match rule so that build_oop_map knows this is
 5999 // not a narrow oop.
 6000 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
 6001   match(Set dst src1);
 6002   effect(TEMP src2);
 6003   ins_cost(DEFAULT_COST);
 6004 
 6005   format %{ "ORI     $dst, $src1, $src2 \t// narrow klass lo" %}
 6006   size(4);
 6007   ins_encode %{
 6008     // Notify OOP recorder (don't need the relocation)
 6009     AddressLiteral md = __ constant_metadata_address((Klass*)$src1$$constant);
 6010     intptr_t Csrc = CompressedKlassPointers::encode((Klass*)md.value());
 6011     __ ori($dst$$Register, $src2$$Register, Csrc & 0xffff);
 6012   %}
 6013   ins_pipe(pipe_class_default);
 6014 %}
 6015 
 6016 // Loading ConNKlass must be postalloc expanded so that edges between
 6017 // the nodes are safe. They may not interfere with a safepoint.
 6018 instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
 6019   match(Set dst src);
 6020   ins_cost(DEFAULT_COST*2);
 6021 
 6022   format %{ "LoadN   $dst, $src \t// postalloc expanded" %} // mask
 6023   postalloc_expand %{
 6024     // Load high bits into register. Sign extended.
 6025     MachNode *m1 = new loadConNKlass_hiNode();
 6026     m1->add_req(nullptr);
 6027     m1->_opnds[0] = op_dst;
 6028     m1->_opnds[1] = op_src;
 6029     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6030     nodes->push(m1);
 6031 
 6032     MachNode *m2 = m1;
 6033     if (!Assembler::is_uimm((jlong)CompressedKlassPointers::encode((Klass *)op_src->constant()), 31)) {
 6034       // Value might be 1-extended. Mask out these bits.
 6035       m2 = new loadConNKlass_maskNode();
 6036       m2->add_req(nullptr, m1);
 6037       m2->_opnds[0] = op_dst;
 6038       m2->_opnds[1] = op_src;
 6039       m2->_opnds[2] = op_dst;
 6040       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6041       nodes->push(m2);
 6042     }
 6043 
 6044     MachNode *m3 = new loadConNKlass_loNode();
 6045     m3->add_req(nullptr, m2);
 6046     m3->_opnds[0] = op_dst;
 6047     m3->_opnds[1] = op_src;
 6048     m3->_opnds[2] = op_dst;
 6049     ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6050     nodes->push(m3);
 6051   %}
 6052 %}
 6053 
 6054 // 0x1 is used in object initialization (initial object header).
 6055 // No constant pool entries required.
 6056 instruct loadConP0or1(iRegPdst dst, immP_0or1 src) %{
 6057   match(Set dst src);
 6058 
 6059   format %{ "LI      $dst, $src \t// ptr" %}
 6060   size(4);
 6061   ins_encode %{
 6062     __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
 6063   %}
 6064   ins_pipe(pipe_class_default);
 6065 %}
 6066 
 6067 // Expand node for constant pool load: small offset.
 6068 // The match rule is needed to generate the correct bottom_type(),
 6069 // however this node should never match. The use of predicate is not
 6070 // possible since ADLC forbids predicates for chain rules. The higher
 6071 // costs do not prevent matching in this case. For that reason the
 6072 // operand immP_NM with predicate(false) is used.
 6073 instruct loadConP(iRegPdst dst, immP_NM src, iRegLdst toc) %{
 6074   match(Set dst src);
 6075   effect(TEMP toc);
 6076 
 6077   ins_num_consts(1);
 6078 
 6079   format %{ "LD      $dst, offset, $toc \t// load ptr $src from TOC" %}
 6080   size(4);
 6081   ins_encode( enc_load_long_constP(dst, src, toc) );
 6082   ins_pipe(pipe_class_memory);
 6083 %}
 6084 
 6085 // Expand node for constant pool load: large offset.
 6086 instruct loadConP_hi(iRegPdst dst, immP_NM src, iRegLdst toc) %{
 6087   effect(DEF dst, USE src, USE toc);
 6088   predicate(false);
 6089 
 6090   ins_num_consts(1);
 6091   ins_field_const_toc_offset(int);
 6092 
 6093   format %{ "ADDIS   $dst, $toc, offset \t// load ptr $src from TOC (hi)" %}
 6094   size(4);
 6095   ins_encode( enc_load_long_constP_hi(dst, src, toc) );
 6096   ins_pipe(pipe_class_default);
 6097 %}
 6098 
 6099 // Expand node for constant pool load: large offset.
 6100 instruct loadConP_lo(iRegPdst dst, immP_NM src, iRegLdst base) %{
 6101   match(Set dst src);
 6102   effect(TEMP base);
 6103 
 6104   ins_field_const_toc_offset_hi_node(loadConP_hiNode*);
 6105 
 6106   format %{ "LD      $dst, offset, $base \t// load ptr $src from TOC (lo)" %}
 6107   size(4);
 6108   ins_encode %{
 6109     int offset = ra_->C->output()->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
 6110     __ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
 6111   %}
 6112   ins_pipe(pipe_class_memory);
 6113 %}
 6114 
 6115 // Load pointer constant from constant table. Expand in case an
 6116 // offset > 16 bit is needed.
 6117 // Adlc adds toc node MachConstantTableBase.
 6118 instruct loadConP_Ex(iRegPdst dst, immP src) %{
 6119   match(Set dst src);
 6120   ins_cost(MEMORY_REF_COST);
 6121 
 6122   // This rule does not use "expand" because then
 6123   // the result type is not known to be an Oop.  An ADLC
 6124   // enhancement will be needed to make that work - not worth it!
 6125 
 6126   // If this instruction rematerializes, it prolongs the live range
 6127   // of the toc node, causing illegal graphs.
 6128   // assert(edge_from_to(_reg_node[reg_lo],def)) fails in verify_good_schedule().
 6129   ins_cannot_rematerialize(true);
 6130 
 6131   format %{ "LD    $dst, offset, $constanttablebase \t//  load ptr $src from table, postalloc expanded" %}
 6132   postalloc_expand( postalloc_expand_load_ptr_constant(dst, src, constanttablebase) );
 6133 %}
 6134 
 6135 // Expand node for constant pool load: small offset.
 6136 instruct loadConF(regF dst, immF src, iRegLdst toc) %{
 6137   effect(DEF dst, USE src, USE toc);
 6138   ins_cost(MEMORY_REF_COST);
 6139 
 6140   ins_num_consts(1);
 6141 
 6142   format %{ "LFS     $dst, offset, $toc \t// load float $src from TOC" %}
 6143   size(4);
 6144   ins_encode %{
 6145     address float_address = __ float_constant($src$$constant);
 6146     if (float_address == nullptr) {
 6147       ciEnv::current()->record_out_of_memory_failure();
 6148       return;
 6149     }
 6150     __ lfs($dst$$FloatRegister, __ offset_to_method_toc(float_address), $toc$$Register);
 6151   %}
 6152   ins_pipe(pipe_class_memory);
 6153 %}
 6154 
 6155 // Expand node for constant pool load: large offset.
 6156 instruct loadConFComp(regF dst, immF src, iRegLdst toc) %{
 6157   effect(DEF dst, USE src, USE toc);
 6158   ins_cost(MEMORY_REF_COST);
 6159 
 6160   ins_num_consts(1);
 6161 
 6162   format %{ "ADDIS   $toc, $toc, offset_hi\n\t"
 6163             "LFS     $dst, offset_lo, $toc \t// load float $src from TOC (hi/lo)\n\t"
 6164             "ADDIS   $toc, $toc, -offset_hi"%}
 6165   size(12);
 6166   ins_encode %{
 6167     FloatRegister Rdst    = $dst$$FloatRegister;
 6168     Register Rtoc         = $toc$$Register;
 6169     address float_address = __ float_constant($src$$constant);
 6170     if (float_address == nullptr) {
 6171       ciEnv::current()->record_out_of_memory_failure();
 6172       return;
 6173     }
 6174     int offset            = __ offset_to_method_toc(float_address);
 6175     int hi = (offset + (1<<15))>>16;
 6176     int lo = offset - hi * (1<<16);
 6177 
 6178     __ addis(Rtoc, Rtoc, hi);
 6179     __ lfs(Rdst, lo, Rtoc);
 6180     __ addis(Rtoc, Rtoc, -hi);
 6181   %}
 6182   ins_pipe(pipe_class_memory);
 6183 %}
 6184 
 6185 // Adlc adds toc node MachConstantTableBase.
 6186 instruct loadConF_Ex(regF dst, immF src) %{
 6187   match(Set dst src);
 6188   ins_cost(MEMORY_REF_COST);
 6189 
 6190   // See loadConP.
 6191   ins_cannot_rematerialize(true);
 6192 
 6193   format %{ "LFS     $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
 6194   postalloc_expand( postalloc_expand_load_float_constant(dst, src, constanttablebase) );
 6195 %}
 6196 
 6197 // Expand node for constant pool load: small offset.
 6198 instruct loadConD(regD dst, immD src, iRegLdst toc) %{
 6199   effect(DEF dst, USE src, USE toc);
 6200   ins_cost(MEMORY_REF_COST);
 6201 
 6202   ins_num_consts(1);
 6203 
 6204   format %{ "LFD     $dst, offset, $toc \t// load double $src from TOC" %}
 6205   size(4);
 6206   ins_encode %{
 6207     address float_address = __ double_constant($src$$constant);
 6208     if (float_address == nullptr) {
 6209       ciEnv::current()->record_out_of_memory_failure();
 6210       return;
 6211     }
 6212     int offset =  __ offset_to_method_toc(float_address);
 6213     __ lfd($dst$$FloatRegister, offset, $toc$$Register);
 6214   %}
 6215   ins_pipe(pipe_class_memory);
 6216 %}
 6217 
 6218 // Expand node for constant pool load: large offset.
 6219 instruct loadConDComp(regD dst, immD src, iRegLdst toc) %{
 6220   effect(DEF dst, USE src, USE toc);
 6221   ins_cost(MEMORY_REF_COST);
 6222 
 6223   ins_num_consts(1);
 6224 
 6225   format %{ "ADDIS   $toc, $toc, offset_hi\n\t"
 6226             "LFD     $dst, offset_lo, $toc \t// load double $src from TOC (hi/lo)\n\t"
 6227             "ADDIS   $toc, $toc, -offset_hi" %}
 6228   size(12);
 6229   ins_encode %{
 6230     FloatRegister Rdst    = $dst$$FloatRegister;
 6231     Register      Rtoc    = $toc$$Register;
 6232     address float_address = __ double_constant($src$$constant);
 6233     if (float_address == nullptr) {
 6234       ciEnv::current()->record_out_of_memory_failure();
 6235       return;
 6236     }
 6237     int offset = __ offset_to_method_toc(float_address);
 6238     int hi = (offset + (1<<15))>>16;
 6239     int lo = offset - hi * (1<<16);
 6240 
 6241     __ addis(Rtoc, Rtoc, hi);
 6242     __ lfd(Rdst, lo, Rtoc);
 6243     __ addis(Rtoc, Rtoc, -hi);
 6244   %}
 6245   ins_pipe(pipe_class_memory);
 6246 %}
 6247 
 6248 // Adlc adds toc node MachConstantTableBase.
 6249 instruct loadConD_Ex(regD dst, immD src) %{
 6250   match(Set dst src);
 6251   ins_cost(MEMORY_REF_COST);
 6252 
 6253   // See loadConP.
 6254   ins_cannot_rematerialize(true);
 6255 
 6256   format %{ "ConD    $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
 6257   postalloc_expand( postalloc_expand_load_double_constant(dst, src, constanttablebase) );
 6258 %}
 6259 
 6260 // Prefetch instructions.
 6261 // Must be safe to execute with invalid address (cannot fault).
 6262 
 6263 // Special prefetch versions which use the dcbz instruction.
 6264 instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
 6265   match(PrefetchAllocation (AddP mem src));
 6266   predicate(AllocatePrefetchStyle == 3);
 6267   ins_cost(MEMORY_REF_COST);
 6268 
 6269   format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
 6270   size(4);
 6271   ins_encode %{
 6272     __ dcbz($src$$Register, $mem$$base$$Register);
 6273   %}
 6274   ins_pipe(pipe_class_memory);
 6275 %}
 6276 
 6277 instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
 6278   match(PrefetchAllocation mem);
 6279   predicate(AllocatePrefetchStyle == 3);
 6280   ins_cost(MEMORY_REF_COST);
 6281 
 6282   format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
 6283   size(4);
 6284   ins_encode %{
 6285     __ dcbz($mem$$base$$Register);
 6286   %}
 6287   ins_pipe(pipe_class_memory);
 6288 %}
 6289 
 6290 instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
 6291   match(PrefetchAllocation (AddP mem src));
 6292   predicate(AllocatePrefetchStyle != 3);
 6293   ins_cost(MEMORY_REF_COST);
 6294 
 6295   format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
 6296   size(4);
 6297   ins_encode %{
 6298     __ dcbtst($src$$Register, $mem$$base$$Register);
 6299   %}
 6300   ins_pipe(pipe_class_memory);
 6301 %}
 6302 
 6303 instruct prefetch_alloc_no_offset(indirectMemory mem) %{
 6304   match(PrefetchAllocation mem);
 6305   predicate(AllocatePrefetchStyle != 3);
 6306   ins_cost(MEMORY_REF_COST);
 6307 
 6308   format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
 6309   size(4);
 6310   ins_encode %{
 6311     __ dcbtst($mem$$base$$Register);
 6312   %}
 6313   ins_pipe(pipe_class_memory);
 6314 %}
 6315 
 6316 //----------Store Instructions-------------------------------------------------
 6317 
 6318 // Store Byte
 6319 instruct storeB(memory mem, iRegIsrc src) %{
 6320   match(Set mem (StoreB mem src));
 6321   ins_cost(MEMORY_REF_COST);
 6322 
 6323   format %{ "STB     $src, $mem \t// byte" %}
 6324   size(4);
 6325   ins_encode %{
 6326     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 6327     __ stb($src$$Register, Idisp, $mem$$base$$Register);
 6328   %}
 6329   ins_pipe(pipe_class_memory);
 6330 %}
 6331 
 6332 // Store Char/Short
 6333 instruct storeC(memory mem, iRegIsrc src) %{
 6334   match(Set mem (StoreC mem src));
 6335   ins_cost(MEMORY_REF_COST);
 6336 
 6337   format %{ "STH     $src, $mem \t// short" %}
 6338   size(4);
 6339   ins_encode %{
 6340     int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
 6341     __ sth($src$$Register, Idisp, $mem$$base$$Register);
 6342   %}
 6343   ins_pipe(pipe_class_memory);
 6344 %}
 6345 
 6346 // Store Integer
 6347 instruct storeI(memory mem, iRegIsrc src) %{
 6348   match(Set mem (StoreI mem src));
 6349   ins_cost(MEMORY_REF_COST);
 6350 
 6351   format %{ "STW     $src, $mem" %}
 6352   size(4);
 6353   ins_encode( enc_stw(src, mem) );
 6354   ins_pipe(pipe_class_memory);
 6355 %}
 6356 
 6357 // ConvL2I + StoreI.
 6358 instruct storeI_convL2I(memory mem, iRegLsrc src) %{
 6359   match(Set mem (StoreI mem (ConvL2I src)));
 6360   ins_cost(MEMORY_REF_COST);
 6361 
 6362   format %{ "STW     l2i($src), $mem" %}
 6363   size(4);
 6364   ins_encode( enc_stw(src, mem) );
 6365   ins_pipe(pipe_class_memory);
 6366 %}
 6367 
 6368 // Store Long
 6369 instruct storeL(memoryAlg4 mem, iRegLsrc src) %{
 6370   match(Set mem (StoreL mem src));
 6371   ins_cost(MEMORY_REF_COST);
 6372 
 6373   format %{ "STD     $src, $mem \t// long" %}
 6374   size(4);
 6375   ins_encode( enc_std(src, mem) );
 6376   ins_pipe(pipe_class_memory);
 6377 %}
 6378 
 6379 // Store super word nodes.
 6380 
 6381 // Store Aligned Packed Byte long register to memory
 6382 instruct storeA8B(memoryAlg4 mem, iRegLsrc src) %{
 6383   predicate(n->as_StoreVector()->memory_size() == 8);
 6384   match(Set mem (StoreVector mem src));
 6385   ins_cost(MEMORY_REF_COST);
 6386 
 6387   format %{ "STD     $mem, $src \t// packed8B" %}
 6388   size(4);
 6389   ins_encode( enc_std(src, mem) );
 6390   ins_pipe(pipe_class_memory);
 6391 %}
 6392 
 6393 // Store Packed Byte long register to memory
 6394 instruct storeV16(indirect mem, vecX src) %{
 6395   predicate(n->as_StoreVector()->memory_size() == 16);
 6396   match(Set mem (StoreVector mem src));
 6397   ins_cost(MEMORY_REF_COST);
 6398 
 6399   format %{ "STXVD2X     $mem, $src \t// store 16-byte Vector" %}
 6400   size(4);
 6401   ins_encode %{
 6402     __ stxvd2x($src$$VectorSRegister, $mem$$Register);
 6403   %}
 6404   ins_pipe(pipe_class_default);
 6405 %}
 6406 
 6407 // Reinterpret: only one vector size used: either L or X
 6408 instruct reinterpretL(iRegLdst dst) %{
 6409   match(Set dst (VectorReinterpret dst));
 6410   ins_cost(0);
 6411   format %{ "reinterpret $dst" %}
 6412   ins_encode( /*empty*/ );
 6413   ins_pipe(pipe_class_empty);
 6414 %}
 6415 
 6416 instruct reinterpretX(vecX dst) %{
 6417   match(Set dst (VectorReinterpret dst));
 6418   ins_cost(0);
 6419   format %{ "reinterpret $dst" %}
 6420   ins_encode( /*empty*/ );
 6421   ins_pipe(pipe_class_empty);
 6422 %}
 6423 
 6424 // Store Compressed Oop
 6425 instruct storeN(memory dst, iRegN_P2N src) %{
 6426   match(Set dst (StoreN dst src));
 6427   predicate(n->as_Store()->barrier_data() == 0);
 6428   ins_cost(MEMORY_REF_COST);
 6429 
 6430   format %{ "STW     $src, $dst \t// compressed oop" %}
 6431   size(4);
 6432   ins_encode( enc_stw(src, dst) );
 6433   ins_pipe(pipe_class_memory);
 6434 %}
 6435 
 6436 // Store Compressed KLass
 6437 instruct storeNKlass(memory dst, iRegN_P2N src) %{
 6438   match(Set dst (StoreNKlass dst src));
 6439   ins_cost(MEMORY_REF_COST);
 6440 
 6441   format %{ "STW     $src, $dst \t// compressed klass" %}
 6442   size(4);
 6443   ins_encode( enc_stw(src, dst) );
 6444   ins_pipe(pipe_class_memory);
 6445 %}
 6446 
 6447 // Store Pointer
 6448 instruct storeP(memoryAlg4 dst, iRegPsrc src) %{
 6449   match(Set dst (StoreP dst src));
 6450   predicate(n->as_Store()->barrier_data() == 0);
 6451   ins_cost(MEMORY_REF_COST);
 6452 
 6453   format %{ "STD     $src, $dst \t// ptr" %}
 6454   size(4);
 6455   ins_encode( enc_std(src, dst) );
 6456   ins_pipe(pipe_class_memory);
 6457 %}
 6458 
 6459 // Store Float
 6460 instruct storeF(memory mem, regF src) %{
 6461   match(Set mem (StoreF mem src));
 6462   ins_cost(MEMORY_REF_COST);
 6463 
 6464   format %{ "STFS    $src, $mem" %}
 6465   size(4);
 6466   ins_encode( enc_stfs(src, mem) );
 6467   ins_pipe(pipe_class_memory);
 6468 %}
 6469 
 6470 // Store Double
 6471 instruct storeD(memory mem, regD src) %{
 6472   match(Set mem (StoreD mem src));
 6473   ins_cost(MEMORY_REF_COST);
 6474 
 6475   format %{ "STFD    $src, $mem" %}
 6476   size(4);
 6477   ins_encode( enc_stfd(src, mem) );
 6478   ins_pipe(pipe_class_memory);
 6479 %}
 6480 
 6481 // Convert oop pointer into compressed form.
 6482 
 6483 // Nodes for postalloc expand.
 6484 
 6485 // Shift node for expand.
 6486 instruct encodeP_shift(iRegNdst dst, iRegNsrc src) %{
 6487   // The match rule is needed to make it a 'MachTypeNode'!
 6488   match(Set dst (EncodeP src));
 6489   predicate(false);
 6490 
 6491   format %{ "SRDI    $dst, $src, 3 \t// encode" %}
 6492   size(4);
 6493   ins_encode %{
 6494     __ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
 6495   %}
 6496   ins_pipe(pipe_class_default);
 6497 %}
 6498 
 6499 // Add node for expand.
 6500 instruct encodeP_sub(iRegPdst dst, iRegPdst src) %{
 6501   // The match rule is needed to make it a 'MachTypeNode'!
 6502   match(Set dst (EncodeP src));
 6503   predicate(false);
 6504 
 6505   format %{ "SUB     $dst, $src, oop_base \t// encode" %}
 6506   ins_encode %{
 6507     __ sub_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
 6508   %}
 6509   ins_pipe(pipe_class_default);
 6510 %}
 6511 
 6512 // Conditional sub base.
 6513 instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
 6514   // The match rule is needed to make it a 'MachTypeNode'!
 6515   match(Set dst (EncodeP (Binary crx src1)));
 6516   predicate(false);
 6517 
 6518   format %{ "BEQ     $crx, done\n\t"
 6519             "SUB     $dst, $src1, heapbase \t// encode: subtract base if != nullptr\n"
 6520             "done:" %}
 6521   ins_encode %{
 6522     Label done;
 6523     __ beq($crx$$CondRegister, done);
 6524     __ sub_const_optimized($dst$$Register, $src1$$Register, CompressedOops::base(), R0);
 6525     __ bind(done);
 6526   %}
 6527   ins_pipe(pipe_class_default);
 6528 %}
 6529 
 6530 // Power 7 can use isel instruction
 6531 instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
 6532   // The match rule is needed to make it a 'MachTypeNode'!
 6533   match(Set dst (EncodeP (Binary crx src1)));
 6534   predicate(false);
 6535 
 6536   format %{ "CMOVE   $dst, $crx eq, 0, $src1 \t// encode: preserve 0" %}
 6537   size(4);
 6538   ins_encode %{
 6539     // This is a Power7 instruction for which no machine description exists.
 6540     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
 6541   %}
 6542   ins_pipe(pipe_class_default);
 6543 %}
 6544 
 6545 // Disjoint narrow oop base.
 6546 instruct encodeP_Disjoint(iRegNdst dst, iRegPsrc src) %{
 6547   match(Set dst (EncodeP src));
 6548   predicate(CompressedOops::base_disjoint());
 6549 
 6550   format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with disjoint base" %}
 6551   size(4);
 6552   ins_encode %{
 6553     __ rldicl($dst$$Register, $src$$Register, 64-CompressedOops::shift(), 32);
 6554   %}
 6555   ins_pipe(pipe_class_default);
 6556 %}
 6557 
 6558 // shift != 0, base != 0
 6559 instruct encodeP_Ex(iRegNdst dst, flagsReg crx, iRegPsrc src) %{
 6560   match(Set dst (EncodeP src));
 6561   effect(TEMP crx);
 6562   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
 6563             CompressedOops::shift() != 0 &&
 6564             CompressedOops::base_overlaps());
 6565 
 6566   format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
 6567   postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
 6568 %}
 6569 
 6570 // shift != 0, base != 0
 6571 instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
 6572   match(Set dst (EncodeP src));
 6573   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
 6574             CompressedOops::shift() != 0 &&
 6575             CompressedOops::base_overlaps());
 6576 
 6577   format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
 6578   postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
 6579 %}
 6580 
 6581 // shift != 0, base == 0
 6582 // TODO: This is the same as encodeP_shift. Merge!
 6583 instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{
 6584   match(Set dst (EncodeP src));
 6585   predicate(CompressedOops::shift() != 0 &&
 6586             CompressedOops::base() == nullptr);
 6587 
 6588   format %{ "SRDI    $dst, $src, #3 \t// encodeP, $src != nullptr" %}
 6589   size(4);
 6590   ins_encode %{
 6591     __ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
 6592   %}
 6593   ins_pipe(pipe_class_default);
 6594 %}
 6595 
 6596 // Compressed OOPs with narrow_oop_shift == 0.
 6597 // shift == 0, base == 0
 6598 instruct encodeP_narrow_oop_shift_0(iRegNdst dst, iRegPsrc src) %{
 6599   match(Set dst (EncodeP src));
 6600   predicate(CompressedOops::shift() == 0);
 6601 
 6602   format %{ "MR      $dst, $src \t// Ptr->Narrow" %}
 6603   // variable size, 0 or 4.
 6604   ins_encode %{
 6605     __ mr_if_needed($dst$$Register, $src$$Register);
 6606   %}
 6607   ins_pipe(pipe_class_default);
 6608 %}
 6609 
 6610 // Decode nodes.
 6611 
 6612 // Shift node for expand.
 6613 instruct decodeN_shift(iRegPdst dst, iRegPsrc src) %{
 6614   // The match rule is needed to make it a 'MachTypeNode'!
 6615   match(Set dst (DecodeN src));
 6616   predicate(false);
 6617 
 6618   format %{ "SLDI    $dst, $src, #3 \t// DecodeN" %}
 6619   size(4);
 6620   ins_encode %{
 6621     __ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
 6622   %}
 6623   ins_pipe(pipe_class_default);
 6624 %}
 6625 
 6626 // Add node for expand.
 6627 instruct decodeN_add(iRegPdst dst, iRegPdst src) %{
 6628   // The match rule is needed to make it a 'MachTypeNode'!
 6629   match(Set dst (DecodeN src));
 6630   predicate(false);
 6631 
 6632   format %{ "ADD     $dst, $src, heapbase \t// DecodeN, add oop base" %}
 6633   ins_encode %{
 6634     __ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
 6635   %}
 6636   ins_pipe(pipe_class_default);
 6637 %}
 6638 
 6639 // conditianal add base for expand
 6640 instruct cond_add_base(iRegPdst dst, flagsRegSrc crx, iRegPsrc src) %{
 6641   // The match rule is needed to make it a 'MachTypeNode'!
 6642   // NOTICE that the rule is nonsense - we just have to make sure that:
 6643   //  - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
 6644   //  - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
 6645   match(Set dst (DecodeN (Binary crx src)));
 6646   predicate(false);
 6647 
 6648   format %{ "BEQ     $crx, done\n\t"
 6649             "ADD     $dst, $src, heapbase \t// DecodeN: add oop base if $src != nullptr\n"
 6650             "done:" %}
 6651   ins_encode %{
 6652     Label done;
 6653     __ beq($crx$$CondRegister, done);
 6654     __ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
 6655     __ bind(done);
 6656   %}
 6657   ins_pipe(pipe_class_default);
 6658 %}
 6659 
 6660 instruct cond_set_0_ptr(iRegPdst dst, flagsRegSrc crx, iRegPsrc src1) %{
 6661   // The match rule is needed to make it a 'MachTypeNode'!
 6662   // NOTICE that the rule is nonsense - we just have to make sure that:
 6663   //  - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
 6664   //  - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
 6665   match(Set dst (DecodeN (Binary crx src1)));
 6666   predicate(false);
 6667 
 6668   format %{ "CMOVE   $dst, $crx eq, 0, $src1 \t// decode: preserve 0" %}
 6669   size(4);
 6670   ins_encode %{
 6671     // This is a Power7 instruction for which no machine description exists.
 6672     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
 6673   %}
 6674   ins_pipe(pipe_class_default);
 6675 %}
 6676 
 6677 //  shift != 0, base != 0
 6678 instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
 6679   match(Set dst (DecodeN src));
 6680   predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
 6681              n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
 6682             CompressedOops::shift() != 0 &&
 6683             CompressedOops::base() != nullptr);
 6684   ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
 6685   effect(TEMP crx);
 6686 
 6687   format %{ "DecodeN $dst, $src \t// Kills $crx, postalloc expanded" %}
 6688   postalloc_expand( postalloc_expand_decode_oop(dst, src, crx) );
 6689 %}
 6690 
 6691 // shift != 0, base == 0
 6692 instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
 6693   match(Set dst (DecodeN src));
 6694   predicate(CompressedOops::shift() != 0 &&
 6695             CompressedOops::base() == nullptr);
 6696 
 6697   format %{ "SLDI    $dst, $src, #3 \t// DecodeN (zerobased)" %}
 6698   size(4);
 6699   ins_encode %{
 6700     __ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
 6701   %}
 6702   ins_pipe(pipe_class_default);
 6703 %}
 6704 
 6705 // Optimize DecodeN for disjoint base.
 6706 // Shift narrow oop and or it into register that already contains the heap base.
 6707 // Base == dst must hold, and is assured by construction in postaloc_expand.
 6708 instruct decodeN_mergeDisjoint(iRegPdst dst, iRegNsrc src, iRegLsrc base) %{
 6709   match(Set dst (DecodeN src));
 6710   effect(TEMP base);
 6711   predicate(false);
 6712 
 6713   format %{ "RLDIMI  $dst, $src, shift, 32-shift \t// DecodeN (disjoint base)" %}
 6714   size(4);
 6715   ins_encode %{
 6716     __ rldimi($dst$$Register, $src$$Register, CompressedOops::shift(), 32-CompressedOops::shift());
 6717   %}
 6718   ins_pipe(pipe_class_default);
 6719 %}
 6720 
 6721 // Optimize DecodeN for disjoint base.
 6722 // This node requires only one cycle on the critical path.
 6723 // We must postalloc_expand as we can not express use_def effects where
 6724 // the used register is L and the def'ed register P.
 6725 instruct decodeN_Disjoint_notNull_Ex(iRegPdst dst, iRegNsrc src) %{
 6726   match(Set dst (DecodeN src));
 6727   effect(TEMP_DEF dst);
 6728   predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
 6729              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
 6730             CompressedOops::base_disjoint());
 6731   ins_cost(DEFAULT_COST);
 6732 
 6733   format %{ "MOV     $dst, heapbase \t\n"
 6734             "RLDIMI  $dst, $src, shift, 32-shift \t// decode with disjoint base" %}
 6735   postalloc_expand %{
 6736     loadBaseNode *n1 = new loadBaseNode();
 6737     n1->add_req(nullptr);
 6738     n1->_opnds[0] = op_dst;
 6739 
 6740     decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
 6741     n2->add_req(n_region, n_src, n1);
 6742     n2->_opnds[0] = op_dst;
 6743     n2->_opnds[1] = op_src;
 6744     n2->_opnds[2] = op_dst;
 6745     n2->_bottom_type = _bottom_type;
 6746 
 6747     assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
 6748     ra_->set_oop(n2, true);
 6749 
 6750     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6751     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6752 
 6753     nodes->push(n1);
 6754     nodes->push(n2);
 6755   %}
 6756 %}
 6757 
 6758 instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
 6759   match(Set dst (DecodeN src));
 6760   effect(TEMP_DEF dst, TEMP crx);
 6761   predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
 6762              n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
 6763             CompressedOops::base_disjoint() && VM_Version::has_isel());
 6764   ins_cost(3 * DEFAULT_COST);
 6765 
 6766   format %{ "DecodeN  $dst, $src \t// decode with disjoint base using isel" %}
 6767   postalloc_expand %{
 6768     loadBaseNode *n1 = new loadBaseNode();
 6769     n1->add_req(nullptr);
 6770     n1->_opnds[0] = op_dst;
 6771 
 6772     cmpN_reg_imm0Node *n_compare  = new cmpN_reg_imm0Node();
 6773     n_compare->add_req(n_region, n_src);
 6774     n_compare->_opnds[0] = op_crx;
 6775     n_compare->_opnds[1] = op_src;
 6776     n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
 6777 
 6778     decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
 6779     n2->add_req(n_region, n_src, n1);
 6780     n2->_opnds[0] = op_dst;
 6781     n2->_opnds[1] = op_src;
 6782     n2->_opnds[2] = op_dst;
 6783     n2->_bottom_type = _bottom_type;
 6784 
 6785     cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
 6786     n_cond_set->add_req(n_region, n_compare, n2);
 6787     n_cond_set->_opnds[0] = op_dst;
 6788     n_cond_set->_opnds[1] = op_crx;
 6789     n_cond_set->_opnds[2] = op_dst;
 6790     n_cond_set->_bottom_type = _bottom_type;
 6791 
 6792     assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
 6793     ra_->set_oop(n_cond_set, true);
 6794 
 6795     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6796     ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
 6797     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6798     ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6799 
 6800     nodes->push(n1);
 6801     nodes->push(n_compare);
 6802     nodes->push(n2);
 6803     nodes->push(n_cond_set);
 6804   %}
 6805 %}
 6806 
 6807 // src != 0, shift != 0, base != 0
 6808 instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
 6809   match(Set dst (DecodeN src));
 6810   predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
 6811              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
 6812             CompressedOops::shift() != 0 &&
 6813             CompressedOops::base() != nullptr);
 6814   ins_cost(2 * DEFAULT_COST);
 6815 
 6816   format %{ "DecodeN $dst, $src \t// $src != nullptr, postalloc expanded" %}
 6817   postalloc_expand( postalloc_expand_decode_oop_not_null(dst, src));
 6818 %}
 6819 
 6820 // Compressed OOPs with narrow_oop_shift == 0.
 6821 instruct decodeN_unscaled(iRegPdst dst, iRegNsrc src) %{
 6822   match(Set dst (DecodeN src));
 6823   predicate(CompressedOops::shift() == 0);
 6824   ins_cost(DEFAULT_COST);
 6825 
 6826   format %{ "MR      $dst, $src \t// DecodeN (unscaled)" %}
 6827   // variable size, 0 or 4.
 6828   ins_encode %{
 6829     __ mr_if_needed($dst$$Register, $src$$Register);
 6830   %}
 6831   ins_pipe(pipe_class_default);
 6832 %}
 6833 
 6834 // Convert compressed oop into int for vectors alignment masking.
 6835 instruct decodeN2I_unscaled(iRegIdst dst, iRegNsrc src) %{
 6836   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 6837   predicate(CompressedOops::shift() == 0);
 6838   ins_cost(DEFAULT_COST);
 6839 
 6840   format %{ "MR      $dst, $src \t// (int)DecodeN (unscaled)" %}
 6841   // variable size, 0 or 4.
 6842   ins_encode %{
 6843     __ mr_if_needed($dst$$Register, $src$$Register);
 6844   %}
 6845   ins_pipe(pipe_class_default);
 6846 %}
 6847 
 6848 // Convert klass pointer into compressed form.
 6849 
 6850 // Nodes for postalloc expand.
 6851 
 6852 // Shift node for expand.
 6853 instruct encodePKlass_shift(iRegNdst dst, iRegNsrc src) %{
 6854   // The match rule is needed to make it a 'MachTypeNode'!
 6855   match(Set dst (EncodePKlass src));
 6856   predicate(false);
 6857 
 6858   format %{ "SRDI    $dst, $src, 3 \t// encode" %}
 6859   size(4);
 6860   ins_encode %{
 6861     __ srdi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
 6862   %}
 6863   ins_pipe(pipe_class_default);
 6864 %}
 6865 
 6866 // Add node for expand.
 6867 instruct encodePKlass_sub_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
 6868   // The match rule is needed to make it a 'MachTypeNode'!
 6869   match(Set dst (EncodePKlass (Binary base src)));
 6870   predicate(false);
 6871 
 6872   format %{ "SUB     $dst, $base, $src \t// encode" %}
 6873   size(4);
 6874   ins_encode %{
 6875     __ subf($dst$$Register, $base$$Register, $src$$Register);
 6876   %}
 6877   ins_pipe(pipe_class_default);
 6878 %}
 6879 
 6880 // Disjoint narrow oop base.
 6881 instruct encodePKlass_Disjoint(iRegNdst dst, iRegPsrc src) %{
 6882   match(Set dst (EncodePKlass src));
 6883   predicate(false /* TODO: PPC port CompressedKlassPointers::base_disjoint()*/);
 6884 
 6885   format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with disjoint base" %}
 6886   size(4);
 6887   ins_encode %{
 6888     __ rldicl($dst$$Register, $src$$Register, 64-CompressedKlassPointers::shift(), 32);
 6889   %}
 6890   ins_pipe(pipe_class_default);
 6891 %}
 6892 
 6893 // shift != 0, base != 0
 6894 instruct encodePKlass_not_null_Ex(iRegNdst dst, iRegLsrc base, iRegPsrc src) %{
 6895   match(Set dst (EncodePKlass (Binary base src)));
 6896   predicate(false);
 6897 
 6898   format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
 6899   postalloc_expand %{
 6900     encodePKlass_sub_baseNode *n1 = new encodePKlass_sub_baseNode();
 6901     n1->add_req(n_region, n_base, n_src);
 6902     n1->_opnds[0] = op_dst;
 6903     n1->_opnds[1] = op_base;
 6904     n1->_opnds[2] = op_src;
 6905     n1->_bottom_type = _bottom_type;
 6906 
 6907     encodePKlass_shiftNode *n2 = new encodePKlass_shiftNode();
 6908     n2->add_req(n_region, n1);
 6909     n2->_opnds[0] = op_dst;
 6910     n2->_opnds[1] = op_dst;
 6911     n2->_bottom_type = _bottom_type;
 6912     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6913     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6914 
 6915     nodes->push(n1);
 6916     nodes->push(n2);
 6917   %}
 6918 %}
 6919 
 6920 // shift != 0, base != 0
 6921 instruct encodePKlass_not_null_ExEx(iRegNdst dst, iRegPsrc src) %{
 6922   match(Set dst (EncodePKlass src));
 6923   //predicate(CompressedKlassPointers::shift() != 0 &&
 6924   //          true /* TODO: PPC port CompressedKlassPointers::base_overlaps()*/);
 6925 
 6926   //format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
 6927   ins_cost(DEFAULT_COST*2);  // Don't count constant.
 6928   expand %{
 6929     immL baseImm %{ (jlong)(intptr_t)CompressedKlassPointers::base() %}
 6930     iRegLdst base;
 6931     loadConL_Ex(base, baseImm);
 6932     encodePKlass_not_null_Ex(dst, base, src);
 6933   %}
 6934 %}
 6935 
 6936 // Decode nodes.
 6937 
 6938 // Shift node for expand.
 6939 instruct decodeNKlass_shift(iRegPdst dst, iRegPsrc src) %{
 6940   // The match rule is needed to make it a 'MachTypeNode'!
 6941   match(Set dst (DecodeNKlass src));
 6942   predicate(false);
 6943 
 6944   format %{ "SLDI    $dst, $src, #3 \t// DecodeNKlass" %}
 6945   size(4);
 6946   ins_encode %{
 6947     __ sldi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
 6948   %}
 6949   ins_pipe(pipe_class_default);
 6950 %}
 6951 
 6952 // Add node for expand.
 6953 
 6954 instruct decodeNKlass_add_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
 6955   // The match rule is needed to make it a 'MachTypeNode'!
 6956   match(Set dst (DecodeNKlass (Binary base src)));
 6957   predicate(false);
 6958 
 6959   format %{ "ADD     $dst, $base, $src \t// DecodeNKlass, add klass base" %}
 6960   size(4);
 6961   ins_encode %{
 6962     __ add($dst$$Register, $base$$Register, $src$$Register);
 6963   %}
 6964   ins_pipe(pipe_class_default);
 6965 %}
 6966 
 6967 // src != 0, shift != 0, base != 0
 6968 instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc src) %{
 6969   match(Set dst (DecodeNKlass (Binary base src)));
 6970   //effect(kill src); // We need a register for the immediate result after shifting.
 6971   predicate(false);
 6972 
 6973   format %{ "DecodeNKlass $dst =  $base + ($src << 3) \t// $src != nullptr, postalloc expanded" %}
 6974   postalloc_expand %{
 6975     decodeNKlass_add_baseNode *n1 = new decodeNKlass_add_baseNode();
 6976     n1->add_req(n_region, n_base, n_src);
 6977     n1->_opnds[0] = op_dst;
 6978     n1->_opnds[1] = op_base;
 6979     n1->_opnds[2] = op_src;
 6980     n1->_bottom_type = _bottom_type;
 6981 
 6982     decodeNKlass_shiftNode *n2 = new decodeNKlass_shiftNode();
 6983     n2->add_req(n_region, n1);
 6984     n2->_opnds[0] = op_dst;
 6985     n2->_opnds[1] = op_dst;
 6986     n2->_bottom_type = _bottom_type;
 6987 
 6988     ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6989     ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 6990 
 6991     nodes->push(n1);
 6992     nodes->push(n2);
 6993   %}
 6994 %}
 6995 
 6996 // src != 0, shift != 0, base != 0
 6997 instruct decodeNKlass_notNull_addBase_ExEx(iRegPdst dst, iRegNsrc src) %{
 6998   match(Set dst (DecodeNKlass src));
 6999   // predicate(CompressedKlassPointers::shift() != 0 &&
 7000   //           CompressedKlassPointers::base() != 0);
 7001 
 7002   //format %{ "DecodeNKlass $dst, $src \t// $src != nullptr, expanded" %}
 7003 
 7004   ins_cost(DEFAULT_COST*2);  // Don't count constant.
 7005   expand %{
 7006     // We add first, then we shift. Like this, we can get along with one register less.
 7007     // But we have to load the base pre-shifted.
 7008     immL baseImm %{ (jlong)((intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift()) %}
 7009     iRegLdst base;
 7010     loadConL_Ex(base, baseImm);
 7011     decodeNKlass_notNull_addBase_Ex(dst, base, src);
 7012   %}
 7013 %}
 7014 
 7015 //----------MemBar Instructions-----------------------------------------------
 7016 // Memory barrier flavors
 7017 
 7018 instruct membar_acquire() %{
 7019   match(LoadFence);
 7020   ins_cost(4*MEMORY_REF_COST);
 7021 
 7022   format %{ "MEMBAR-acquire" %}
 7023   size(4);
 7024   ins_encode %{
 7025     __ acquire();
 7026   %}
 7027   ins_pipe(pipe_class_default);
 7028 %}
 7029 
 7030 instruct unnecessary_membar_acquire() %{
 7031   match(MemBarAcquire);
 7032   ins_cost(0);
 7033 
 7034   format %{ " -- \t// redundant MEMBAR-acquire - empty" %}
 7035   size(0);
 7036   ins_encode( /*empty*/ );
 7037   ins_pipe(pipe_class_default);
 7038 %}
 7039 
 7040 instruct membar_acquire_lock() %{
 7041   match(MemBarAcquireLock);
 7042   ins_cost(0);
 7043 
 7044   format %{ " -- \t// redundant MEMBAR-acquire - empty (acquire as part of CAS in prior FastLock)" %}
 7045   size(0);
 7046   ins_encode( /*empty*/ );
 7047   ins_pipe(pipe_class_default);
 7048 %}
 7049 
 7050 instruct membar_release() %{
 7051   match(MemBarRelease);
 7052   match(StoreFence);
 7053   ins_cost(4*MEMORY_REF_COST);
 7054 
 7055   format %{ "MEMBAR-release" %}
 7056   size(4);
 7057   ins_encode %{
 7058     __ release();
 7059   %}
 7060   ins_pipe(pipe_class_default);
 7061 %}
 7062 
 7063 instruct membar_storestore() %{
 7064   match(MemBarStoreStore);
 7065   match(StoreStoreFence);
 7066   ins_cost(4*MEMORY_REF_COST);
 7067 
 7068   format %{ "MEMBAR-store-store" %}
 7069   size(4);
 7070   ins_encode %{
 7071     __ membar(Assembler::StoreStore);
 7072   %}
 7073   ins_pipe(pipe_class_default);
 7074 %}
 7075 
 7076 instruct membar_release_lock() %{
 7077   match(MemBarReleaseLock);
 7078   ins_cost(0);
 7079 
 7080   format %{ " -- \t// redundant MEMBAR-release - empty (release in FastUnlock)" %}
 7081   size(0);
 7082   ins_encode( /*empty*/ );
 7083   ins_pipe(pipe_class_default);
 7084 %}
 7085 
 7086 instruct membar_volatile() %{
 7087   match(MemBarVolatile);
 7088   ins_cost(4*MEMORY_REF_COST);
 7089 
 7090   format %{ "MEMBAR-volatile" %}
 7091   size(4);
 7092   ins_encode %{
 7093     __ fence();
 7094   %}
 7095   ins_pipe(pipe_class_default);
 7096 %}
 7097 
 7098 // This optimization is wrong on PPC. The following pattern is not supported:
 7099 //  MemBarVolatile
 7100 //   ^        ^
 7101 //   |        |
 7102 //  CtrlProj MemProj
 7103 //   ^        ^
 7104 //   |        |
 7105 //   |       Load
 7106 //   |
 7107 //  MemBarVolatile
 7108 //
 7109 //  The first MemBarVolatile could get optimized out! According to
 7110 //  Vladimir, this pattern can not occur on Oracle platforms.
 7111 //  However, it does occur on PPC64 (because of membars in
 7112 //  inline_unsafe_load_store).
 7113 //
 7114 // Add this node again if we found a good solution for inline_unsafe_load_store().
 7115 // Don't forget to look at the implementation of post_store_load_barrier again,
 7116 // we did other fixes in that method.
 7117 //instruct unnecessary_membar_volatile() %{
 7118 //  match(MemBarVolatile);
 7119 //  predicate(Matcher::post_store_load_barrier(n));
 7120 //  ins_cost(0);
 7121 //
 7122 //  format %{ " -- \t// redundant MEMBAR-volatile - empty" %}
 7123 //  size(0);
 7124 //  ins_encode( /*empty*/ );
 7125 //  ins_pipe(pipe_class_default);
 7126 //%}
 7127 
 7128 instruct membar_CPUOrder() %{
 7129   match(MemBarCPUOrder);
 7130   ins_cost(0);
 7131 
 7132   format %{ " -- \t// MEMBAR-CPUOrder - empty: PPC64 processors are self-consistent." %}
 7133   size(0);
 7134   ins_encode( /*empty*/ );
 7135   ins_pipe(pipe_class_default);
 7136 %}
 7137 
 7138 //----------Conditional Move---------------------------------------------------
 7139 
 7140 // Cmove using isel.
 7141 instruct cmovI_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, iRegIsrc src) %{
 7142   match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
 7143   predicate(VM_Version::has_isel());
 7144   ins_cost(DEFAULT_COST);
 7145 
 7146   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7147   size(4);
 7148   ins_encode %{
 7149     // This is a Power7 instruction for which no machine description
 7150     // exists. Anyways, the scheduler should be off on Power7.
 7151     int cc        = $cmp$$cmpcode;
 7152     __ isel($dst$$Register, $crx$$CondRegister,
 7153             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
 7154   %}
 7155   ins_pipe(pipe_class_default);
 7156 %}
 7157 
 7158 instruct cmovI_reg(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, iRegIsrc src) %{
 7159   match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
 7160   predicate(!VM_Version::has_isel());
 7161   ins_cost(DEFAULT_COST+BRANCH_COST);
 7162 
 7163   ins_variable_size_depending_on_alignment(true);
 7164 
 7165   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7166   // Worst case is branch + move + stop, no stop without scheduler
 7167   size(8);
 7168   ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
 7169   ins_pipe(pipe_class_default);
 7170 %}
 7171 
 7172 instruct cmovI_imm(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, immI16 src) %{
 7173   match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
 7174   ins_cost(DEFAULT_COST+BRANCH_COST);
 7175 
 7176   ins_variable_size_depending_on_alignment(true);
 7177 
 7178   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7179   // Worst case is branch + move + stop, no stop without scheduler
 7180   size(8);
 7181   ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
 7182   ins_pipe(pipe_class_default);
 7183 %}
 7184 
 7185 // Cmove using isel.
 7186 instruct cmovL_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, iRegLsrc src) %{
 7187   match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
 7188   predicate(VM_Version::has_isel());
 7189   ins_cost(DEFAULT_COST);
 7190 
 7191   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7192   size(4);
 7193   ins_encode %{
 7194     // This is a Power7 instruction for which no machine description
 7195     // exists. Anyways, the scheduler should be off on Power7.
 7196     int cc        = $cmp$$cmpcode;
 7197     __ isel($dst$$Register, $crx$$CondRegister,
 7198             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
 7199   %}
 7200   ins_pipe(pipe_class_default);
 7201 %}
 7202 
 7203 instruct cmovL_reg(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, iRegLsrc src) %{
 7204   match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
 7205   predicate(!VM_Version::has_isel());
 7206   ins_cost(DEFAULT_COST+BRANCH_COST);
 7207 
 7208   ins_variable_size_depending_on_alignment(true);
 7209 
 7210   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7211   // Worst case is branch + move + stop, no stop without scheduler.
 7212   size(8);
 7213   ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
 7214   ins_pipe(pipe_class_default);
 7215 %}
 7216 
 7217 instruct cmovL_imm(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, immL16 src) %{
 7218   match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
 7219   ins_cost(DEFAULT_COST+BRANCH_COST);
 7220 
 7221   ins_variable_size_depending_on_alignment(true);
 7222 
 7223   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7224   // Worst case is branch + move + stop, no stop without scheduler.
 7225   size(8);
 7226   ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
 7227   ins_pipe(pipe_class_default);
 7228 %}
 7229 
 7230 // Cmove using isel.
 7231 instruct cmovN_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, iRegNsrc src) %{
 7232   match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
 7233   predicate(VM_Version::has_isel());
 7234   ins_cost(DEFAULT_COST);
 7235 
 7236   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7237   size(4);
 7238   ins_encode %{
 7239     // This is a Power7 instruction for which no machine description
 7240     // exists. Anyways, the scheduler should be off on Power7.
 7241     int cc        = $cmp$$cmpcode;
 7242     __ isel($dst$$Register, $crx$$CondRegister,
 7243             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
 7244   %}
 7245   ins_pipe(pipe_class_default);
 7246 %}
 7247 
 7248 // Conditional move for RegN. Only cmov(reg, reg).
 7249 instruct cmovN_reg(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, iRegNsrc src) %{
 7250   match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
 7251   predicate(!VM_Version::has_isel());
 7252   ins_cost(DEFAULT_COST+BRANCH_COST);
 7253 
 7254   ins_variable_size_depending_on_alignment(true);
 7255 
 7256   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7257   // Worst case is branch + move + stop, no stop without scheduler.
 7258   size(8);
 7259   ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
 7260   ins_pipe(pipe_class_default);
 7261 %}
 7262 
 7263 instruct cmovN_imm(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, immN_0 src) %{
 7264   match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
 7265   ins_cost(DEFAULT_COST+BRANCH_COST);
 7266 
 7267   ins_variable_size_depending_on_alignment(true);
 7268 
 7269   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7270   // Worst case is branch + move + stop, no stop without scheduler.
 7271   size(8);
 7272   ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
 7273   ins_pipe(pipe_class_default);
 7274 %}
 7275 
 7276 // Cmove using isel.
 7277 instruct cmovP_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, iRegPsrc src) %{
 7278   match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
 7279   predicate(VM_Version::has_isel());
 7280   ins_cost(DEFAULT_COST);
 7281 
 7282   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7283   size(4);
 7284   ins_encode %{
 7285     // This is a Power7 instruction for which no machine description
 7286     // exists. Anyways, the scheduler should be off on Power7.
 7287     int cc        = $cmp$$cmpcode;
 7288     __ isel($dst$$Register, $crx$$CondRegister,
 7289             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
 7290   %}
 7291   ins_pipe(pipe_class_default);
 7292 %}
 7293 
 7294 instruct cmovP_reg(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, iRegP_N2P src) %{
 7295   match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
 7296   predicate(!VM_Version::has_isel());
 7297   ins_cost(DEFAULT_COST+BRANCH_COST);
 7298 
 7299   ins_variable_size_depending_on_alignment(true);
 7300 
 7301   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7302   // Worst case is branch + move + stop, no stop without scheduler.
 7303   size(8);
 7304   ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
 7305   ins_pipe(pipe_class_default);
 7306 %}
 7307 
 7308 instruct cmovP_imm(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, immP_0 src) %{
 7309   match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
 7310   ins_cost(DEFAULT_COST+BRANCH_COST);
 7311 
 7312   ins_variable_size_depending_on_alignment(true);
 7313 
 7314   format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
 7315   // Worst case is branch + move + stop, no stop without scheduler.
 7316   size(8);
 7317   ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
 7318   ins_pipe(pipe_class_default);
 7319 %}
 7320 
 7321 instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
 7322   match(Set dst (CMoveF (Binary cmp crx) (Binary dst src)));
 7323   ins_cost(DEFAULT_COST+BRANCH_COST);
 7324 
 7325   ins_variable_size_depending_on_alignment(true);
 7326 
 7327   format %{ "CMOVEF  $cmp, $crx, $dst, $src\n\t" %}
 7328   // Worst case is branch + move + stop, no stop without scheduler.
 7329   size(8);
 7330   ins_encode %{
 7331     Label done;
 7332     assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
 7333     // Branch if not (cmp crx).
 7334     __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
 7335     __ fmr($dst$$FloatRegister, $src$$FloatRegister);
 7336     __ bind(done);
 7337   %}
 7338   ins_pipe(pipe_class_default);
 7339 %}
 7340 
 7341 instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
 7342   match(Set dst (CMoveD (Binary cmp crx) (Binary dst src)));
 7343   ins_cost(DEFAULT_COST+BRANCH_COST);
 7344 
 7345   ins_variable_size_depending_on_alignment(true);
 7346 
 7347   format %{ "CMOVEF  $cmp, $crx, $dst, $src\n\t" %}
 7348   // Worst case is branch + move + stop, no stop without scheduler.
 7349   size(8);
 7350   ins_encode %{
 7351     Label done;
 7352     assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
 7353     // Branch if not (cmp crx).
 7354     __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
 7355     __ fmr($dst$$FloatRegister, $src$$FloatRegister);
 7356     __ bind(done);
 7357   %}
 7358   ins_pipe(pipe_class_default);
 7359 %}
 7360 
 7361 //----------Compare-And-Swap---------------------------------------------------
 7362 
 7363 // CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
 7364 // (CompareAndSwap ...)" or "If (CmpI (CompareAndSwap ..))"  cannot be
 7365 // matched.
 7366 
 7367 // Strong versions:
 7368 
 7369 instruct compareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7370   match(Set res (CompareAndSwapB mem_ptr (Binary src1 src2)));
 7371   predicate(VM_Version::has_lqarx());
 7372   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7373   format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
 7374   ins_encode %{
 7375     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7376     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7377                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7378                 $res$$Register, nullptr, true);
 7379     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7380       __ isync();
 7381     } else {
 7382       __ sync();
 7383     }
 7384   %}
 7385   ins_pipe(pipe_class_default);
 7386 %}
 7387 
 7388 instruct compareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7389   match(Set res (CompareAndSwapB mem_ptr (Binary src1 src2)));
 7390   predicate(!VM_Version::has_lqarx());
 7391   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7392   format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
 7393   ins_encode %{
 7394     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7395     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7396                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7397                 $res$$Register, nullptr, true);
 7398     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7399       __ isync();
 7400     } else {
 7401       __ sync();
 7402     }
 7403   %}
 7404   ins_pipe(pipe_class_default);
 7405 %}
 7406 
 7407 instruct compareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7408   match(Set res (CompareAndSwapS mem_ptr (Binary src1 src2)));
 7409   predicate(VM_Version::has_lqarx());
 7410   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7411   format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
 7412   ins_encode %{
 7413     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7414     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7415                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7416                 $res$$Register, nullptr, true);
 7417     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7418       __ isync();
 7419     } else {
 7420       __ sync();
 7421     }
 7422   %}
 7423   ins_pipe(pipe_class_default);
 7424 %}
 7425 
 7426 instruct compareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7427   match(Set res (CompareAndSwapS mem_ptr (Binary src1 src2)));
 7428   predicate(!VM_Version::has_lqarx());
 7429   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7430   format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
 7431   ins_encode %{
 7432     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7433     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7434                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7435                 $res$$Register, nullptr, true);
 7436     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7437       __ isync();
 7438     } else {
 7439       __ sync();
 7440     }
 7441   %}
 7442   ins_pipe(pipe_class_default);
 7443 %}
 7444 
 7445 instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7446   match(Set res (CompareAndSwapI mem_ptr (Binary src1 src2)));
 7447   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7448   format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
 7449   ins_encode %{
 7450     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7451     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7452                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7453                 $res$$Register, nullptr, true);
 7454     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7455       __ isync();
 7456     } else {
 7457       __ sync();
 7458     }
 7459   %}
 7460   ins_pipe(pipe_class_default);
 7461 %}
 7462 
 7463 instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
 7464   match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2)));
 7465   predicate(n->as_LoadStore()->barrier_data() == 0);
 7466   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7467   format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
 7468   ins_encode %{
 7469     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7470     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7471                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7472                 $res$$Register, nullptr, true);
 7473     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7474       __ isync();
 7475     } else {
 7476       __ sync();
 7477     }
 7478   %}
 7479   ins_pipe(pipe_class_default);
 7480 %}
 7481 
 7482 instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
 7483   match(Set res (CompareAndSwapL mem_ptr (Binary src1 src2)));
 7484   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7485   format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
 7486   ins_encode %{
 7487     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7488     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7489                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7490                 $res$$Register, nullptr, true);
 7491     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7492       __ isync();
 7493     } else {
 7494       __ sync();
 7495     }
 7496   %}
 7497   ins_pipe(pipe_class_default);
 7498 %}
 7499 
 7500 instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
 7501   match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2)));
 7502   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7503   predicate(n->as_LoadStore()->barrier_data() == 0);
 7504   format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
 7505   ins_encode %{
 7506     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7507     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7508                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7509                 $res$$Register, nullptr, true);
 7510     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7511       __ isync();
 7512     } else {
 7513       __ sync();
 7514     }
 7515   %}
 7516   ins_pipe(pipe_class_default);
 7517 %}
 7518 
 7519 // Weak versions:
 7520 
 7521 instruct weakCompareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7522   match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
 7523   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && VM_Version::has_lqarx());
 7524   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7525   format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
 7526   ins_encode %{
 7527     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7528     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7529                 MacroAssembler::MemBarNone,
 7530                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7531   %}
 7532   ins_pipe(pipe_class_default);
 7533 %}
 7534 
 7535 instruct weakCompareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7536   match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
 7537   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && !VM_Version::has_lqarx());
 7538   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7539   format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
 7540   ins_encode %{
 7541     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7542     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7543                 MacroAssembler::MemBarNone,
 7544                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7545   %}
 7546   ins_pipe(pipe_class_default);
 7547 %}
 7548 
 7549 instruct weakCompareAndSwapB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7550   match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
 7551   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && VM_Version::has_lqarx());
 7552   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7553   format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7554   ins_encode %{
 7555     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7556     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7557                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7558                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7559   %}
 7560   ins_pipe(pipe_class_default);
 7561 %}
 7562 
 7563 instruct weakCompareAndSwapB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7564   match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
 7565   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && !VM_Version::has_lqarx());
 7566   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7567   format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7568   ins_encode %{
 7569     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7570     __ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7571                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7572                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7573   %}
 7574   ins_pipe(pipe_class_default);
 7575 %}
 7576 
 7577 instruct weakCompareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7578   match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
 7579   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && VM_Version::has_lqarx());
 7580   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7581   format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
 7582   ins_encode %{
 7583     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7584     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7585                 MacroAssembler::MemBarNone,
 7586                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7587   %}
 7588   ins_pipe(pipe_class_default);
 7589 %}
 7590 
 7591 instruct weakCompareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7592   match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
 7593   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && !VM_Version::has_lqarx());
 7594   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7595   format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
 7596   ins_encode %{
 7597     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7598     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7599                 MacroAssembler::MemBarNone,
 7600                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7601   %}
 7602   ins_pipe(pipe_class_default);
 7603 %}
 7604 
 7605 instruct weakCompareAndSwapS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7606   match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
 7607   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && VM_Version::has_lqarx());
 7608   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7609   format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7610   ins_encode %{
 7611     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7612     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7613                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7614                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7615   %}
 7616   ins_pipe(pipe_class_default);
 7617 %}
 7618 
 7619 instruct weakCompareAndSwapS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, iRegIdst tmp2, flagsRegCR0 cr0) %{
 7620   match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
 7621   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && !VM_Version::has_lqarx());
 7622   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
 7623   format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7624   ins_encode %{
 7625     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7626     __ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
 7627                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7628                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7629   %}
 7630   ins_pipe(pipe_class_default);
 7631 %}
 7632 
 7633 instruct weakCompareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7634   match(Set res (WeakCompareAndSwapI mem_ptr (Binary src1 src2)));
 7635   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
 7636   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7637   format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
 7638   ins_encode %{
 7639     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7640     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7641                 MacroAssembler::MemBarNone,
 7642                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7643   %}
 7644   ins_pipe(pipe_class_default);
 7645 %}
 7646 
 7647 instruct weakCompareAndSwapI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7648   match(Set res (WeakCompareAndSwapI mem_ptr (Binary src1 src2)));
 7649   predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
 7650   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7651   format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7652   ins_encode %{
 7653     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7654     // Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
 7655     // value is never passed to caller.
 7656     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7657                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7658                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7659   %}
 7660   ins_pipe(pipe_class_default);
 7661 %}
 7662 
 7663 instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
 7664   match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
 7665   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
 7666   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7667   format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
 7668   ins_encode %{
 7669     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7670     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7671                 MacroAssembler::MemBarNone,
 7672                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7673   %}
 7674   ins_pipe(pipe_class_default);
 7675 %}
 7676 
 7677 instruct weakCompareAndSwapN_acq_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
 7678   match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
 7679   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
 7680   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7681   format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7682   ins_encode %{
 7683     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7684     // Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
 7685     // value is never passed to caller.
 7686     __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7687                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7688                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7689   %}
 7690   ins_pipe(pipe_class_default);
 7691 %}
 7692 
 7693 instruct weakCompareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
 7694   match(Set res (WeakCompareAndSwapL mem_ptr (Binary src1 src2)));
 7695   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
 7696   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7697   format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
 7698   ins_encode %{
 7699     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7700     // value is never passed to caller.
 7701     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7702                 MacroAssembler::MemBarNone,
 7703                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7704   %}
 7705   ins_pipe(pipe_class_default);
 7706 %}
 7707 
 7708 instruct weakCompareAndSwapL_acq_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
 7709   match(Set res (WeakCompareAndSwapL mem_ptr (Binary src1 src2)));
 7710   predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
 7711   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7712   format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool" %}
 7713   ins_encode %{
 7714     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7715     // Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
 7716     // value is never passed to caller.
 7717     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7718                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7719                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7720   %}
 7721   ins_pipe(pipe_class_default);
 7722 %}
 7723 
 7724 instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
 7725   match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
 7726   predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
 7727   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7728   format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
 7729   ins_encode %{
 7730     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7731     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7732                 MacroAssembler::MemBarNone,
 7733                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7734   %}
 7735   ins_pipe(pipe_class_default);
 7736 %}
 7737 
 7738 instruct weakCompareAndSwapP_acq_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
 7739   match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
 7740   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
 7741   effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
 7742   format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
 7743   ins_encode %{
 7744     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7745     // Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
 7746     // value is never passed to caller.
 7747     __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7748                 support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
 7749                 MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
 7750   %}
 7751   ins_pipe(pipe_class_default);
 7752 %}
 7753 
 7754 // CompareAndExchange
 7755 
 7756 instruct compareAndExchangeB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7757   match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
 7758   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && VM_Version::has_lqarx());
 7759   effect(TEMP_DEF res, TEMP cr0);
 7760   format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
 7761   ins_encode %{
 7762     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7763     __ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7764                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7765                 noreg, nullptr, true);
 7766   %}
 7767   ins_pipe(pipe_class_default);
 7768 %}
 7769 
 7770 instruct compareAndExchangeB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, flagsRegCR0 cr0) %{
 7771   match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
 7772   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && !VM_Version::has_lqarx());
 7773   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
 7774   format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
 7775   ins_encode %{
 7776     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7777     __ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
 7778                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7779                 noreg, nullptr, true);
 7780   %}
 7781   ins_pipe(pipe_class_default);
 7782 %}
 7783 
 7784 instruct compareAndExchangeB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7785   match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
 7786   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && VM_Version::has_lqarx());
 7787   effect(TEMP_DEF res, TEMP cr0);
 7788   format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
 7789   ins_encode %{
 7790     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7791     __ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7792                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7793                 noreg, nullptr, true);
 7794     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7795       __ isync();
 7796     } else {
 7797       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7798       __ sync();
 7799     }
 7800   %}
 7801   ins_pipe(pipe_class_default);
 7802 %}
 7803 
 7804 instruct compareAndExchangeB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, flagsRegCR0 cr0) %{
 7805   match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
 7806   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && !VM_Version::has_lqarx());
 7807   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
 7808   format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
 7809   ins_encode %{
 7810     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7811     __ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
 7812                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7813                 noreg, nullptr, true);
 7814     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7815       __ isync();
 7816     } else {
 7817       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7818       __ sync();
 7819     }
 7820   %}
 7821   ins_pipe(pipe_class_default);
 7822 %}
 7823 
 7824 instruct compareAndExchangeS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7825   match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
 7826   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && VM_Version::has_lqarx());
 7827   effect(TEMP_DEF res, TEMP cr0);
 7828   format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
 7829   ins_encode %{
 7830     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7831     __ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7832                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7833                 noreg, nullptr, true);
 7834   %}
 7835   ins_pipe(pipe_class_default);
 7836 %}
 7837 
 7838 instruct compareAndExchangeS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, flagsRegCR0 cr0) %{
 7839   match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
 7840   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && !VM_Version::has_lqarx());
 7841   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
 7842   format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
 7843   ins_encode %{
 7844     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7845     __ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
 7846                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7847                 noreg, nullptr, true);
 7848   %}
 7849   ins_pipe(pipe_class_default);
 7850 %}
 7851 
 7852 instruct compareAndExchangeS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7853   match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
 7854   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && VM_Version::has_lqarx());
 7855   effect(TEMP_DEF res, TEMP cr0);
 7856   format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
 7857   ins_encode %{
 7858     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7859     __ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
 7860                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7861                 noreg, nullptr, true);
 7862     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7863       __ isync();
 7864     } else {
 7865       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7866       __ sync();
 7867     }
 7868   %}
 7869   ins_pipe(pipe_class_default);
 7870 %}
 7871 
 7872 instruct compareAndExchangeS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src1, rarg4RegI src2, iRegIdst tmp1, flagsRegCR0 cr0) %{
 7873   match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
 7874   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && !VM_Version::has_lqarx());
 7875   effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
 7876   format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
 7877   ins_encode %{
 7878     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7879     __ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
 7880                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7881                 noreg, nullptr, true);
 7882     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7883       __ isync();
 7884     } else {
 7885       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7886       __ sync();
 7887     }
 7888   %}
 7889   ins_pipe(pipe_class_default);
 7890 %}
 7891 
 7892 instruct compareAndExchangeI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7893   match(Set res (CompareAndExchangeI mem_ptr (Binary src1 src2)));
 7894   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
 7895   effect(TEMP_DEF res, TEMP cr0);
 7896   format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as int" %}
 7897   ins_encode %{
 7898     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7899     __ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7900                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7901                 noreg, nullptr, true);
 7902   %}
 7903   ins_pipe(pipe_class_default);
 7904 %}
 7905 
 7906 instruct compareAndExchangeI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
 7907   match(Set res (CompareAndExchangeI mem_ptr (Binary src1 src2)));
 7908   predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
 7909   effect(TEMP_DEF res, TEMP cr0);
 7910   format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as int" %}
 7911   ins_encode %{
 7912     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7913     __ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7914                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7915                 noreg, nullptr, true);
 7916     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7917       __ isync();
 7918     } else {
 7919       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7920       __ sync();
 7921     }
 7922   %}
 7923   ins_pipe(pipe_class_default);
 7924 %}
 7925 
 7926 instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
 7927   match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
 7928   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
 7929   effect(TEMP_DEF res, TEMP cr0);
 7930   format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as narrow oop" %}
 7931   ins_encode %{
 7932     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7933     __ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7934                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7935                 noreg, nullptr, true);
 7936   %}
 7937   ins_pipe(pipe_class_default);
 7938 %}
 7939 
 7940 instruct compareAndExchangeN_acq_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
 7941   match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
 7942   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
 7943   effect(TEMP_DEF res, TEMP cr0);
 7944   format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as narrow oop" %}
 7945   ins_encode %{
 7946     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7947     __ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7948                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7949                 noreg, nullptr, true);
 7950     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7951       __ isync();
 7952     } else {
 7953       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7954       __ sync();
 7955     }
 7956   %}
 7957   ins_pipe(pipe_class_default);
 7958 %}
 7959 
 7960 instruct compareAndExchangeL_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
 7961   match(Set res (CompareAndExchangeL mem_ptr (Binary src1 src2)));
 7962   predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
 7963   effect(TEMP_DEF res, TEMP cr0);
 7964   format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as long" %}
 7965   ins_encode %{
 7966     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7967     __ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7968                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7969                 noreg, nullptr, true);
 7970   %}
 7971   ins_pipe(pipe_class_default);
 7972 %}
 7973 
 7974 instruct compareAndExchangeL_acq_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
 7975   match(Set res (CompareAndExchangeL mem_ptr (Binary src1 src2)));
 7976   predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
 7977   effect(TEMP_DEF res, TEMP cr0);
 7978   format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as long" %}
 7979   ins_encode %{
 7980     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 7981     __ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 7982                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 7983                 noreg, nullptr, true);
 7984     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 7985       __ isync();
 7986     } else {
 7987       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 7988       __ sync();
 7989     }
 7990   %}
 7991   ins_pipe(pipe_class_default);
 7992 %}
 7993 
 7994 instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
 7995   match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
 7996   predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst)
 7997             && n->as_LoadStore()->barrier_data() == 0);
 7998   effect(TEMP_DEF res, TEMP cr0);
 7999   format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
 8000   ins_encode %{
 8001     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 8002     __ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 8003                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 8004                 noreg, nullptr, true);
 8005   %}
 8006   ins_pipe(pipe_class_default);
 8007 %}
 8008 
 8009 instruct compareAndExchangeP_acq_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
 8010   match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
 8011   predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst)
 8012             && n->as_LoadStore()->barrier_data() == 0);
 8013   effect(TEMP_DEF res, TEMP cr0);
 8014   format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
 8015   ins_encode %{
 8016     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
 8017     __ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
 8018                 MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
 8019                 noreg, nullptr, true);
 8020     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8021       __ isync();
 8022     } else {
 8023       // isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
 8024       __ sync();
 8025     }
 8026   %}
 8027   ins_pipe(pipe_class_default);
 8028 %}
 8029 
 8030 // Special RMW
 8031 
 8032 instruct getAndAddB(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8033   match(Set res (GetAndAddB mem_ptr src));
 8034   predicate(VM_Version::has_lqarx());
 8035   effect(TEMP_DEF res, TEMP cr0);
 8036   format %{ "GetAndAddB $res, $mem_ptr, $src" %}
 8037   ins_encode %{
 8038     __ getandaddb($res$$Register, $src$$Register, $mem_ptr$$Register,
 8039                   R0, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
 8040     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8041       __ isync();
 8042     } else {
 8043       __ sync();
 8044     }
 8045   %}
 8046   ins_pipe(pipe_class_default);
 8047 %}
 8048 
 8049 instruct getAndAddB4(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src, iRegIsrc tmp1, iRegIsrc tmp2, flagsRegCR0 cr0) %{
 8050   match(Set res (GetAndAddB mem_ptr src));
 8051   predicate(!VM_Version::has_lqarx());
 8052   effect(TEMP_DEF res, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0);
 8053   format %{ "GetAndAddB $res, $mem_ptr, $src" %}
 8054   ins_encode %{
 8055     __ getandaddb($res$$Register, $src$$Register, $mem_ptr$$Register,
 8056                   R0, $tmp1$$Register, $tmp2$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
 8057     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8058       __ isync();
 8059     } else {
 8060       __ sync();
 8061     }
 8062   %}
 8063   ins_pipe(pipe_class_default);
 8064 %}
 8065 
 8066 instruct getAndAddS(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8067   match(Set res (GetAndAddS mem_ptr src));
 8068   predicate(VM_Version::has_lqarx());
 8069   effect(TEMP_DEF res, TEMP cr0);
 8070   format %{ "GetAndAddS $res, $mem_ptr, $src" %}
 8071   ins_encode %{
 8072     __ getandaddh($res$$Register, $src$$Register, $mem_ptr$$Register,
 8073                   R0, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
 8074     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8075       __ isync();
 8076     } else {
 8077       __ sync();
 8078     }
 8079   %}
 8080   ins_pipe(pipe_class_default);
 8081 %}
 8082 
 8083 instruct getAndAddS4(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src, iRegIsrc tmp1, iRegIsrc tmp2, flagsRegCR0 cr0) %{
 8084   match(Set res (GetAndAddS mem_ptr src));
 8085   predicate(!VM_Version::has_lqarx());
 8086   effect(TEMP_DEF res, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0);
 8087   format %{ "GetAndAddS $res, $mem_ptr, $src" %}
 8088   ins_encode %{
 8089     __ getandaddh($res$$Register, $src$$Register, $mem_ptr$$Register,
 8090                   R0, $tmp1$$Register, $tmp2$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
 8091     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8092       __ isync();
 8093     } else {
 8094       __ sync();
 8095     }
 8096   %}
 8097   ins_pipe(pipe_class_default);
 8098 %}
 8099 
 8100 instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8101   match(Set res (GetAndAddI mem_ptr src));
 8102   effect(TEMP_DEF res, TEMP cr0);
 8103   format %{ "GetAndAddI $res, $mem_ptr, $src" %}
 8104   ins_encode %{
 8105     __ getandaddw($res$$Register, $src$$Register, $mem_ptr$$Register,
 8106                   R0, MacroAssembler::cmpxchgx_hint_atomic_update());
 8107     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8108       __ isync();
 8109     } else {
 8110       __ sync();
 8111     }
 8112   %}
 8113   ins_pipe(pipe_class_default);
 8114 %}
 8115 
 8116 instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
 8117   match(Set res (GetAndAddL mem_ptr src));
 8118   effect(TEMP_DEF res, TEMP cr0);
 8119   format %{ "GetAndAddL $res, $mem_ptr, $src" %}
 8120   ins_encode %{
 8121     __ getandaddd($res$$Register, $src$$Register, $mem_ptr$$Register,
 8122                   R0, MacroAssembler::cmpxchgx_hint_atomic_update());
 8123     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8124       __ isync();
 8125     } else {
 8126       __ sync();
 8127     }
 8128   %}
 8129   ins_pipe(pipe_class_default);
 8130 %}
 8131 
 8132 instruct getAndSetB(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8133   match(Set res (GetAndSetB mem_ptr src));
 8134   predicate(VM_Version::has_lqarx());
 8135   effect(TEMP_DEF res, TEMP cr0);
 8136   format %{ "GetAndSetB $res, $mem_ptr, $src" %}
 8137   ins_encode %{
 8138     __ getandsetb($res$$Register, $src$$Register, $mem_ptr$$Register,
 8139                   noreg, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
 8140     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8141       __ isync();
 8142     } else {
 8143       __ sync();
 8144     }
 8145   %}
 8146   ins_pipe(pipe_class_default);
 8147 %}
 8148 
 8149 instruct getAndSetB4(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src, iRegIsrc tmp1, iRegIsrc tmp2, flagsRegCR0 cr0) %{
 8150   match(Set res (GetAndSetB mem_ptr src));
 8151   predicate(!VM_Version::has_lqarx());
 8152   effect(TEMP_DEF res, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0);
 8153   format %{ "GetAndSetB $res, $mem_ptr, $src" %}
 8154   ins_encode %{
 8155     __ getandsetb($res$$Register, $src$$Register, $mem_ptr$$Register,
 8156                   R0, $tmp1$$Register, $tmp2$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
 8157     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8158       __ isync();
 8159     } else {
 8160       __ sync();
 8161     }
 8162   %}
 8163   ins_pipe(pipe_class_default);
 8164 %}
 8165 
 8166 instruct getAndSetS(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8167   match(Set res (GetAndSetS mem_ptr src));
 8168   predicate(VM_Version::has_lqarx());
 8169   effect(TEMP_DEF res, TEMP cr0);
 8170   format %{ "GetAndSetS $res, $mem_ptr, $src" %}
 8171   ins_encode %{
 8172     __ getandseth($res$$Register, $src$$Register, $mem_ptr$$Register,
 8173                   noreg, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
 8174     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8175       __ isync();
 8176     } else {
 8177       __ sync();
 8178     }
 8179   %}
 8180   ins_pipe(pipe_class_default);
 8181 %}
 8182 
 8183 instruct getAndSetS4(iRegIdst res, rarg3RegP mem_ptr, iRegIsrc src, iRegIsrc tmp1, iRegIsrc tmp2, flagsRegCR0 cr0) %{
 8184   match(Set res (GetAndSetS mem_ptr src));
 8185   predicate(!VM_Version::has_lqarx());
 8186   effect(TEMP_DEF res, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0);
 8187   format %{ "GetAndSetS $res, $mem_ptr, $src" %}
 8188   ins_encode %{
 8189     __ getandseth($res$$Register, $src$$Register, $mem_ptr$$Register,
 8190                   R0, $tmp1$$Register, $tmp2$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
 8191     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8192       __ isync();
 8193     } else {
 8194       __ sync();
 8195     }
 8196   %}
 8197   ins_pipe(pipe_class_default);
 8198 %}
 8199 
 8200 instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
 8201   match(Set res (GetAndSetI mem_ptr src));
 8202   effect(TEMP_DEF res, TEMP cr0);
 8203   format %{ "GetAndSetI $res, $mem_ptr, $src" %}
 8204   ins_encode %{
 8205     __ getandsetw($res$$Register, $src$$Register, $mem_ptr$$Register,
 8206                   MacroAssembler::cmpxchgx_hint_atomic_update());
 8207     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8208       __ isync();
 8209     } else {
 8210       __ sync();
 8211     }
 8212   %}
 8213   ins_pipe(pipe_class_default);
 8214 %}
 8215 
 8216 instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
 8217   match(Set res (GetAndSetL mem_ptr src));
 8218   effect(TEMP_DEF res, TEMP cr0);
 8219   format %{ "GetAndSetL $res, $mem_ptr, $src" %}
 8220   ins_encode %{
 8221     __ getandsetd($res$$Register, $src$$Register, $mem_ptr$$Register,
 8222                   MacroAssembler::cmpxchgx_hint_atomic_update());
 8223     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8224       __ isync();
 8225     } else {
 8226       __ sync();
 8227     }
 8228   %}
 8229   ins_pipe(pipe_class_default);
 8230 %}
 8231 
 8232 instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src, flagsRegCR0 cr0) %{
 8233   match(Set res (GetAndSetP mem_ptr src));
 8234   predicate(n->as_LoadStore()->barrier_data() == 0);
 8235   effect(TEMP_DEF res, TEMP cr0);
 8236   format %{ "GetAndSetP $res, $mem_ptr, $src" %}
 8237   ins_encode %{
 8238     __ getandsetd($res$$Register, $src$$Register, $mem_ptr$$Register,
 8239                   MacroAssembler::cmpxchgx_hint_atomic_update());
 8240     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8241       __ isync();
 8242     } else {
 8243       __ sync();
 8244     }
 8245   %}
 8246   ins_pipe(pipe_class_default);
 8247 %}
 8248 
 8249 instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src, flagsRegCR0 cr0) %{
 8250   match(Set res (GetAndSetN mem_ptr src));
 8251   predicate(n->as_LoadStore()->barrier_data() == 0);
 8252   effect(TEMP_DEF res, TEMP cr0);
 8253   format %{ "GetAndSetN $res, $mem_ptr, $src" %}
 8254   ins_encode %{
 8255     __ getandsetw($res$$Register, $src$$Register, $mem_ptr$$Register,
 8256                   MacroAssembler::cmpxchgx_hint_atomic_update());
 8257     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 8258       __ isync();
 8259     } else {
 8260       __ sync();
 8261     }
 8262   %}
 8263   ins_pipe(pipe_class_default);
 8264 %}
 8265 
 8266 //----------Arithmetic Instructions--------------------------------------------
 8267 // Addition Instructions
 8268 
 8269 // Register Addition
 8270 instruct addI_reg_reg(iRegIdst dst, iRegIsrc_iRegL2Isrc src1, iRegIsrc_iRegL2Isrc src2) %{
 8271   match(Set dst (AddI src1 src2));
 8272   format %{ "ADD     $dst, $src1, $src2" %}
 8273   size(4);
 8274   ins_encode %{
 8275     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8276   %}
 8277   ins_pipe(pipe_class_default);
 8278 %}
 8279 
 8280 // Expand does not work with above instruct. (??)
 8281 instruct addI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8282   // no match-rule
 8283   effect(DEF dst, USE src1, USE src2);
 8284   format %{ "ADD     $dst, $src1, $src2" %}
 8285   size(4);
 8286   ins_encode %{
 8287     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8288   %}
 8289   ins_pipe(pipe_class_default);
 8290 %}
 8291 
 8292 instruct tree_addI_addI_addI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
 8293   match(Set dst (AddI (AddI (AddI src1 src2) src3) src4));
 8294   ins_cost(DEFAULT_COST*3);
 8295 
 8296   expand %{
 8297     // FIXME: we should do this in the ideal world.
 8298     iRegIdst tmp1;
 8299     iRegIdst tmp2;
 8300     addI_reg_reg(tmp1, src1, src2);
 8301     addI_reg_reg_2(tmp2, src3, src4); // Adlc complains about addI_reg_reg.
 8302     addI_reg_reg(dst, tmp1, tmp2);
 8303   %}
 8304 %}
 8305 
 8306 // Immediate Addition
 8307 instruct addI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
 8308   match(Set dst (AddI src1 src2));
 8309   format %{ "ADDI    $dst, $src1, $src2" %}
 8310   size(4);
 8311   ins_encode %{
 8312     __ addi($dst$$Register, $src1$$Register, $src2$$constant);
 8313   %}
 8314   ins_pipe(pipe_class_default);
 8315 %}
 8316 
 8317 // Immediate Addition with 16-bit shifted operand
 8318 instruct addI_reg_immhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2) %{
 8319   match(Set dst (AddI src1 src2));
 8320   format %{ "ADDIS   $dst, $src1, $src2" %}
 8321   size(4);
 8322   ins_encode %{
 8323     __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
 8324   %}
 8325   ins_pipe(pipe_class_default);
 8326 %}
 8327 
 8328 // Immediate Addition using prefixed addi
 8329 instruct addI_reg_imm32(iRegIdst dst, iRegIsrc src1, immI32 src2) %{
 8330   match(Set dst (AddI src1 src2));
 8331   predicate(PowerArchitecturePPC64 >= 10);
 8332   ins_cost(DEFAULT_COST+1);
 8333   format %{ "PADDI   $dst, $src1, $src2" %}
 8334   size(8);
 8335   ins_encode %{
 8336     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
 8337     __ paddi($dst$$Register, $src1$$Register, $src2$$constant);
 8338   %}
 8339   ins_pipe(pipe_class_default);
 8340   ins_alignment(2);
 8341 %}
 8342 
 8343 // Long Addition
 8344 instruct addL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8345   match(Set dst (AddL src1 src2));
 8346   format %{ "ADD     $dst, $src1, $src2 \t// long" %}
 8347   size(4);
 8348   ins_encode %{
 8349     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8350   %}
 8351   ins_pipe(pipe_class_default);
 8352 %}
 8353 
 8354 // Expand does not work with above instruct. (??)
 8355 instruct addL_reg_reg_2(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8356   // no match-rule
 8357   effect(DEF dst, USE src1, USE src2);
 8358   format %{ "ADD     $dst, $src1, $src2 \t// long" %}
 8359   size(4);
 8360   ins_encode %{
 8361     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8362   %}
 8363   ins_pipe(pipe_class_default);
 8364 %}
 8365 
 8366 instruct tree_addL_addL_addL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, iRegLsrc src3, iRegLsrc src4) %{
 8367   match(Set dst (AddL (AddL (AddL src1 src2) src3) src4));
 8368   ins_cost(DEFAULT_COST*3);
 8369 
 8370   expand %{
 8371     // FIXME: we should do this in the ideal world.
 8372     iRegLdst tmp1;
 8373     iRegLdst tmp2;
 8374     addL_reg_reg(tmp1, src1, src2);
 8375     addL_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
 8376     addL_reg_reg(dst, tmp1, tmp2);
 8377   %}
 8378 %}
 8379 
 8380 // AddL + ConvL2I.
 8381 instruct addI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8382   match(Set dst (ConvL2I (AddL src1 src2)));
 8383 
 8384   format %{ "ADD     $dst, $src1, $src2 \t// long + l2i" %}
 8385   size(4);
 8386   ins_encode %{
 8387     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8388   %}
 8389   ins_pipe(pipe_class_default);
 8390 %}
 8391 
 8392 // No constant pool entries required.
 8393 instruct addL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
 8394   match(Set dst (AddL src1 src2));
 8395 
 8396   format %{ "ADDI    $dst, $src1, $src2" %}
 8397   size(4);
 8398   ins_encode %{
 8399     __ addi($dst$$Register, $src1$$Register, $src2$$constant);
 8400   %}
 8401   ins_pipe(pipe_class_default);
 8402 %}
 8403 
 8404 // Long Immediate Addition with 16-bit shifted operand.
 8405 // No constant pool entries required.
 8406 instruct addL_reg_immhi16(iRegLdst dst, iRegLsrc src1, immL32hi16 src2) %{
 8407   match(Set dst (AddL src1 src2));
 8408 
 8409   format %{ "ADDIS   $dst, $src1, $src2" %}
 8410   size(4);
 8411   ins_encode %{
 8412     __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
 8413   %}
 8414   ins_pipe(pipe_class_default);
 8415 %}
 8416 
 8417 // Long Immediate Addition using prefixed addi
 8418 // No constant pool entries required.
 8419 instruct addL_reg_imm34(iRegLdst dst, iRegLsrc src1, immL34 src2) %{
 8420   match(Set dst (AddL src1 src2));
 8421   predicate(PowerArchitecturePPC64 >= 10);
 8422   ins_cost(DEFAULT_COST+1);
 8423 
 8424   format %{ "PADDI   $dst, $src1, $src2" %}
 8425   size(8);
 8426   ins_encode %{
 8427     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
 8428     __ paddi($dst$$Register, $src1$$Register, $src2$$constant);
 8429   %}
 8430   ins_pipe(pipe_class_default);
 8431   ins_alignment(2);
 8432 %}
 8433 
 8434 // Pointer Register Addition
 8435 instruct addP_reg_reg(iRegPdst dst, iRegP_N2P src1, iRegLsrc src2) %{
 8436   match(Set dst (AddP src1 src2));
 8437   format %{ "ADD     $dst, $src1, $src2" %}
 8438   size(4);
 8439   ins_encode %{
 8440     __ add($dst$$Register, $src1$$Register, $src2$$Register);
 8441   %}
 8442   ins_pipe(pipe_class_default);
 8443 %}
 8444 
 8445 // Pointer Immediate Addition
 8446 // No constant pool entries required.
 8447 instruct addP_reg_imm16(iRegPdst dst, iRegP_N2P src1, immL16 src2) %{
 8448   match(Set dst (AddP src1 src2));
 8449 
 8450   format %{ "ADDI    $dst, $src1, $src2" %}
 8451   size(4);
 8452   ins_encode %{
 8453     __ addi($dst$$Register, $src1$$Register, $src2$$constant);
 8454   %}
 8455   ins_pipe(pipe_class_default);
 8456 %}
 8457 
 8458 // Pointer Immediate Addition with 16-bit shifted operand.
 8459 // No constant pool entries required.
 8460 instruct addP_reg_immhi16(iRegPdst dst, iRegP_N2P src1, immL32hi16 src2) %{
 8461   match(Set dst (AddP src1 src2));
 8462 
 8463   format %{ "ADDIS   $dst, $src1, $src2" %}
 8464   size(4);
 8465   ins_encode %{
 8466     __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
 8467   %}
 8468   ins_pipe(pipe_class_default);
 8469 %}
 8470 
 8471 // Pointer Immediate Addition using prefixed addi
 8472 // No constant pool entries required.
 8473 instruct addP_reg_imm34(iRegPdst dst, iRegP_N2P src1, immL34 src2) %{
 8474   match(Set dst (AddP src1 src2));
 8475   predicate(PowerArchitecturePPC64 >= 10);
 8476   ins_cost(DEFAULT_COST+1);
 8477 
 8478   format %{ "PADDI    $dst, $src1, $src2" %}
 8479   size(8);
 8480   ins_encode %{
 8481     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
 8482     __ paddi($dst$$Register, $src1$$Register, $src2$$constant);
 8483   %}
 8484   ins_pipe(pipe_class_default);
 8485   ins_alignment(2);
 8486 %}
 8487 
 8488 //---------------------
 8489 // Subtraction Instructions
 8490 
 8491 // Register Subtraction
 8492 instruct subI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8493   match(Set dst (SubI src1 src2));
 8494   format %{ "SUBF    $dst, $src2, $src1" %}
 8495   size(4);
 8496   ins_encode %{
 8497     __ subf($dst$$Register, $src2$$Register, $src1$$Register);
 8498   %}
 8499   ins_pipe(pipe_class_default);
 8500 %}
 8501 
 8502 // Immediate Subtraction
 8503 // Immediate Subtraction: The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal),
 8504 // Don't try to use addi with - $src2$$constant since it can overflow when $src2$$constant == minI16.
 8505 
 8506 // SubI from constant (using subfic).
 8507 instruct subI_imm16_reg(iRegIdst dst, immI16 src1, iRegIsrc src2) %{
 8508   match(Set dst (SubI src1 src2));
 8509   format %{ "SUBI    $dst, $src1, $src2" %}
 8510 
 8511   size(4);
 8512   ins_encode %{
 8513     __ subfic($dst$$Register, $src2$$Register, $src1$$constant);
 8514   %}
 8515   ins_pipe(pipe_class_default);
 8516 %}
 8517 
 8518 // Turn the sign-bit of an integer into a 32-bit mask, 0x0...0 for
 8519 // positive integers and 0xF...F for negative ones.
 8520 instruct signmask32I_regI(iRegIdst dst, iRegIsrc src) %{
 8521   // no match-rule, false predicate
 8522   effect(DEF dst, USE src);
 8523   predicate(false);
 8524 
 8525   format %{ "SRAWI   $dst, $src, #31" %}
 8526   size(4);
 8527   ins_encode %{
 8528     __ srawi($dst$$Register, $src$$Register, 0x1f);
 8529   %}
 8530   ins_pipe(pipe_class_default);
 8531 %}
 8532 
 8533 instruct absI_reg_Ex(iRegIdst dst, iRegIsrc src) %{
 8534   match(Set dst (AbsI src));
 8535   ins_cost(DEFAULT_COST*3);
 8536 
 8537   expand %{
 8538     iRegIdst tmp1;
 8539     iRegIdst tmp2;
 8540     signmask32I_regI(tmp1, src);
 8541     xorI_reg_reg(tmp2, tmp1, src);
 8542     subI_reg_reg(dst, tmp2, tmp1);
 8543   %}
 8544 %}
 8545 
 8546 instruct negI_regI(iRegIdst dst, immI_0 zero, iRegIsrc src2) %{
 8547   match(Set dst (SubI zero src2));
 8548   format %{ "NEG     $dst, $src2" %}
 8549   size(4);
 8550   ins_encode %{
 8551     __ neg($dst$$Register, $src2$$Register);
 8552   %}
 8553   ins_pipe(pipe_class_default);
 8554 %}
 8555 
 8556 // Long subtraction
 8557 instruct subL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8558   match(Set dst (SubL src1 src2));
 8559   format %{ "SUBF    $dst, $src2, $src1 \t// long" %}
 8560   size(4);
 8561   ins_encode %{
 8562     __ subf($dst$$Register, $src2$$Register, $src1$$Register);
 8563   %}
 8564   ins_pipe(pipe_class_default);
 8565 %}
 8566 
 8567 // SubL + convL2I.
 8568 instruct subI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8569   match(Set dst (ConvL2I (SubL src1 src2)));
 8570 
 8571   format %{ "SUBF    $dst, $src2, $src1 \t// long + l2i" %}
 8572   size(4);
 8573   ins_encode %{
 8574     __ subf($dst$$Register, $src2$$Register, $src1$$Register);
 8575   %}
 8576   ins_pipe(pipe_class_default);
 8577 %}
 8578 
 8579 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
 8580 // positive longs and 0xF...F for negative ones.
 8581 instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
 8582   // no match-rule, false predicate
 8583   effect(DEF dst, USE src);
 8584   predicate(false);
 8585 
 8586   format %{ "SRADI   $dst, $src, #63" %}
 8587   size(4);
 8588   ins_encode %{
 8589     __ sradi($dst$$Register, $src$$Register, 0x3f);
 8590   %}
 8591   ins_pipe(pipe_class_default);
 8592 %}
 8593 
 8594 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
 8595 // positive longs and 0xF...F for negative ones.
 8596 instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
 8597   // no match-rule, false predicate
 8598   effect(DEF dst, USE src);
 8599   predicate(false);
 8600 
 8601   format %{ "SRADI   $dst, $src, #63" %}
 8602   size(4);
 8603   ins_encode %{
 8604     __ sradi($dst$$Register, $src$$Register, 0x3f);
 8605   %}
 8606   ins_pipe(pipe_class_default);
 8607 %}
 8608 
 8609 instruct absL_reg_Ex(iRegLdst dst, iRegLsrc src) %{
 8610   match(Set dst (AbsL src));
 8611   ins_cost(DEFAULT_COST*3);
 8612 
 8613   expand %{
 8614     iRegLdst tmp1;
 8615     iRegLdst tmp2;
 8616     signmask64L_regL(tmp1, src);
 8617     xorL_reg_reg(tmp2, tmp1, src);
 8618     subL_reg_reg(dst, tmp2, tmp1);
 8619   %}
 8620 %}
 8621 
 8622 // Long negation
 8623 instruct negL_reg_reg(iRegLdst dst, immL_0 zero, iRegLsrc src2) %{
 8624   match(Set dst (SubL zero src2));
 8625   format %{ "NEG     $dst, $src2 \t// long" %}
 8626   size(4);
 8627   ins_encode %{
 8628     __ neg($dst$$Register, $src2$$Register);
 8629   %}
 8630   ins_pipe(pipe_class_default);
 8631 %}
 8632 
 8633 // NegL + ConvL2I.
 8634 instruct negI_con0_regL(iRegIdst dst, immL_0 zero, iRegLsrc src2) %{
 8635   match(Set dst (ConvL2I (SubL zero src2)));
 8636 
 8637   format %{ "NEG     $dst, $src2 \t// long + l2i" %}
 8638   size(4);
 8639   ins_encode %{
 8640     __ neg($dst$$Register, $src2$$Register);
 8641   %}
 8642   ins_pipe(pipe_class_default);
 8643 %}
 8644 
 8645 // Multiplication Instructions
 8646 // Integer Multiplication
 8647 
 8648 // Register Multiplication
 8649 instruct mulI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8650   match(Set dst (MulI src1 src2));
 8651   ins_cost(DEFAULT_COST);
 8652 
 8653   format %{ "MULLW   $dst, $src1, $src2" %}
 8654   size(4);
 8655   ins_encode %{
 8656     __ mullw($dst$$Register, $src1$$Register, $src2$$Register);
 8657   %}
 8658   ins_pipe(pipe_class_default);
 8659 %}
 8660 
 8661 // Immediate Multiplication
 8662 instruct mulI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
 8663   match(Set dst (MulI src1 src2));
 8664   ins_cost(DEFAULT_COST);
 8665 
 8666   format %{ "MULLI   $dst, $src1, $src2" %}
 8667   size(4);
 8668   ins_encode %{
 8669     __ mulli($dst$$Register, $src1$$Register, $src2$$constant);
 8670   %}
 8671   ins_pipe(pipe_class_default);
 8672 %}
 8673 
 8674 instruct mulL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8675   match(Set dst (MulL src1 src2));
 8676   ins_cost(DEFAULT_COST);
 8677 
 8678   format %{ "MULLD   $dst $src1, $src2 \t// long" %}
 8679   size(4);
 8680   ins_encode %{
 8681     __ mulld($dst$$Register, $src1$$Register, $src2$$Register);
 8682   %}
 8683   ins_pipe(pipe_class_default);
 8684 %}
 8685 
 8686 // Multiply high for optimized long division by constant.
 8687 instruct mulHighL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8688   match(Set dst (MulHiL src1 src2));
 8689   ins_cost(DEFAULT_COST);
 8690 
 8691   format %{ "MULHD   $dst $src1, $src2 \t// long" %}
 8692   size(4);
 8693   ins_encode %{
 8694     __ mulhd($dst$$Register, $src1$$Register, $src2$$Register);
 8695   %}
 8696   ins_pipe(pipe_class_default);
 8697 %}
 8698 
 8699 // Immediate Multiplication
 8700 instruct mulL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
 8701   match(Set dst (MulL src1 src2));
 8702   ins_cost(DEFAULT_COST);
 8703 
 8704   format %{ "MULLI   $dst, $src1, $src2" %}
 8705   size(4);
 8706   ins_encode %{
 8707     __ mulli($dst$$Register, $src1$$Register, $src2$$constant);
 8708   %}
 8709   ins_pipe(pipe_class_default);
 8710 %}
 8711 
 8712 // Integer Division with Immediate -1: Negate.
 8713 instruct divI_reg_immIvalueMinus1(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
 8714   match(Set dst (DivI src1 src2));
 8715   ins_cost(DEFAULT_COST);
 8716 
 8717   format %{ "NEG     $dst, $src1 \t// /-1" %}
 8718   size(4);
 8719   ins_encode %{
 8720     __ neg($dst$$Register, $src1$$Register);
 8721   %}
 8722   ins_pipe(pipe_class_default);
 8723 %}
 8724 
 8725 // Integer Division with constant, but not -1.
 8726 // We should be able to improve this by checking the type of src2.
 8727 // It might well be that src2 is known to be positive.
 8728 instruct divI_reg_regnotMinus1(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8729   match(Set dst (DivI src1 src2));
 8730   predicate(n->in(2)->find_int_con(-1) != -1); // src2 is a constant, but not -1
 8731   ins_cost(2*DEFAULT_COST);
 8732 
 8733   format %{ "DIVW    $dst, $src1, $src2 \t// /not-1" %}
 8734   size(4);
 8735   ins_encode %{
 8736     __ divw($dst$$Register, $src1$$Register, $src2$$Register);
 8737   %}
 8738   ins_pipe(pipe_class_default);
 8739 %}
 8740 
 8741 instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
 8742   effect(USE_DEF dst, USE src1, USE crx);
 8743   predicate(false);
 8744 
 8745   ins_variable_size_depending_on_alignment(true);
 8746 
 8747   format %{ "CMOVE   $dst, neg($src1), $crx" %}
 8748   // Worst case is branch + move + stop, no stop without scheduler.
 8749   size(8);
 8750   ins_encode %{
 8751     Label done;
 8752     __ bne($crx$$CondRegister, done);
 8753     __ neg($dst$$Register, $src1$$Register);
 8754     __ bind(done);
 8755   %}
 8756   ins_pipe(pipe_class_default);
 8757 %}
 8758 
 8759 // Integer Division with Registers not containing constants.
 8760 instruct divI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8761   match(Set dst (DivI src1 src2));
 8762   ins_cost(10*DEFAULT_COST);
 8763 
 8764   expand %{
 8765     immI16 imm %{ (int)-1 %}
 8766     flagsReg tmp1;
 8767     cmpI_reg_imm16(tmp1, src2, imm);          // check src2 == -1
 8768     divI_reg_regnotMinus1(dst, src1, src2);   // dst = src1 / src2
 8769     cmovI_bne_negI_reg(dst, tmp1, src1);      // cmove dst = neg(src1) if src2 == -1
 8770   %}
 8771 %}
 8772 
 8773 // Long Division with Immediate -1: Negate.
 8774 instruct divL_reg_immLvalueMinus1(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
 8775   match(Set dst (DivL src1 src2));
 8776   ins_cost(DEFAULT_COST);
 8777 
 8778   format %{ "NEG     $dst, $src1 \t// /-1, long" %}
 8779   size(4);
 8780   ins_encode %{
 8781     __ neg($dst$$Register, $src1$$Register);
 8782   %}
 8783   ins_pipe(pipe_class_default);
 8784 %}
 8785 
 8786 // Long Division with constant, but not -1.
 8787 instruct divL_reg_regnotMinus1(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8788   match(Set dst (DivL src1 src2));
 8789   predicate(n->in(2)->find_long_con(-1L) != -1L); // Src2 is a constant, but not -1.
 8790   ins_cost(2*DEFAULT_COST);
 8791 
 8792   format %{ "DIVD    $dst, $src1, $src2 \t// /not-1, long" %}
 8793   size(4);
 8794   ins_encode %{
 8795     __ divd($dst$$Register, $src1$$Register, $src2$$Register);
 8796   %}
 8797   ins_pipe(pipe_class_default);
 8798 %}
 8799 
 8800 instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
 8801   effect(USE_DEF dst, USE src1, USE crx);
 8802   predicate(false);
 8803 
 8804   ins_variable_size_depending_on_alignment(true);
 8805 
 8806   format %{ "CMOVE   $dst, neg($src1), $crx" %}
 8807   // Worst case is branch + move + stop, no stop without scheduler.
 8808   size(8);
 8809   ins_encode %{
 8810     Label done;
 8811     __ bne($crx$$CondRegister, done);
 8812     __ neg($dst$$Register, $src1$$Register);
 8813     __ bind(done);
 8814   %}
 8815   ins_pipe(pipe_class_default);
 8816 %}
 8817 
 8818 // Long Division with Registers not containing constants.
 8819 instruct divL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8820   match(Set dst (DivL src1 src2));
 8821   ins_cost(10*DEFAULT_COST);
 8822 
 8823   expand %{
 8824     immL16 imm %{ (int)-1 %}
 8825     flagsReg tmp1;
 8826     cmpL_reg_imm16(tmp1, src2, imm);          // check src2 == -1
 8827     divL_reg_regnotMinus1(dst, src1, src2);   // dst = src1 / src2
 8828     cmovL_bne_negL_reg(dst, tmp1, src1);      // cmove dst = neg(src1) if src2 == -1
 8829   %}
 8830 %}
 8831 
 8832 // Integer Remainder with registers.
 8833 instruct modI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8834   match(Set dst (ModI src1 src2));
 8835   ins_cost(10*DEFAULT_COST);
 8836 
 8837   expand %{
 8838     immI16 imm %{ (int)-1 %}
 8839     flagsReg tmp1;
 8840     iRegIdst tmp2;
 8841     iRegIdst tmp3;
 8842     cmpI_reg_imm16(tmp1, src2, imm);           // check src2 == -1
 8843     divI_reg_regnotMinus1(tmp2, src1, src2);   // tmp2 = src1 / src2
 8844     cmovI_bne_negI_reg(tmp2, tmp1, src1);      // cmove tmp2 = neg(src1) if src2 == -1
 8845     mulI_reg_reg(tmp3, src2, tmp2);            // tmp3 = src2 * tmp2
 8846     subI_reg_reg(dst, src1, tmp3);             // dst = src1 - tmp3
 8847   %}
 8848 %}
 8849 
 8850 // Long Remainder with registers
 8851 instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8852   match(Set dst (ModL src1 src2));
 8853   ins_cost(10*DEFAULT_COST);
 8854 
 8855   expand %{
 8856     immL16 imm %{ (int)-1 %}
 8857     flagsReg tmp1;
 8858     iRegLdst tmp2;
 8859     iRegLdst tmp3;
 8860     cmpL_reg_imm16(tmp1, src2, imm);             // check src2 == -1
 8861     divL_reg_regnotMinus1(tmp2, src1, src2);     // tmp2 = src1 / src2
 8862     cmovL_bne_negL_reg(tmp2, tmp1, src1);        // cmove tmp2 = neg(src1) if src2 == -1
 8863     mulL_reg_reg(tmp3, src2, tmp2);              // tmp3 = src2 * tmp2
 8864     subL_reg_reg(dst, src1, tmp3);               // dst = src1 - tmp3
 8865   %}
 8866 %}
 8867 
 8868 instruct udivI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8869   match(Set dst (UDivI src1 src2));
 8870   format %{ "DIVWU   $dst, $src1, $src2" %}
 8871   size(4);
 8872   ins_encode %{
 8873     __ divwu($dst$$Register, $src1$$Register, $src2$$Register);
 8874   %}
 8875   ins_pipe(pipe_class_default);
 8876 %}
 8877 
 8878 instruct umodI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8879   match(Set dst (UModI src1 src2));
 8880   expand %{
 8881     iRegIdst tmp1;
 8882     iRegIdst tmp2;
 8883     udivI_reg_reg(tmp1, src1, src2);
 8884     // Compute lower 32 bit result using signed instructions as suggested by ISA.
 8885     // Upper 32 bit will contain garbage.
 8886     mulI_reg_reg(tmp2, src2, tmp1);
 8887     subI_reg_reg(dst, src1, tmp2);
 8888   %}
 8889 %}
 8890 
 8891 instruct udivL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8892   match(Set dst (UDivL src1 src2));
 8893   format %{ "DIVDU   $dst, $src1, $src2" %}
 8894   size(4);
 8895   ins_encode %{
 8896     __ divdu($dst$$Register, $src1$$Register, $src2$$Register);
 8897   %}
 8898   ins_pipe(pipe_class_default);
 8899 %}
 8900 
 8901 instruct umodL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 8902   match(Set dst (UModL src1 src2));
 8903   expand %{
 8904     iRegLdst tmp1;
 8905     iRegLdst tmp2;
 8906     udivL_reg_reg(tmp1, src1, src2);
 8907     mulL_reg_reg(tmp2, src2, tmp1);
 8908     subL_reg_reg(dst, src1, tmp2);
 8909   %}
 8910 %}
 8911 
 8912 // Integer Shift Instructions
 8913 
 8914 // Register Shift Left
 8915 
 8916 // Clear all but the lowest #mask bits.
 8917 // Used to normalize shift amounts in registers.
 8918 instruct maskI_reg_imm(iRegIdst dst, iRegIsrc src, uimmI6 mask) %{
 8919   // no match-rule, false predicate
 8920   effect(DEF dst, USE src, USE mask);
 8921   predicate(false);
 8922 
 8923   format %{ "MASK    $dst, $src, $mask \t// clear $mask upper bits" %}
 8924   size(4);
 8925   ins_encode %{
 8926     __ clrldi($dst$$Register, $src$$Register, $mask$$constant);
 8927   %}
 8928   ins_pipe(pipe_class_default);
 8929 %}
 8930 
 8931 instruct lShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8932   // no match-rule, false predicate
 8933   effect(DEF dst, USE src1, USE src2);
 8934   predicate(false);
 8935 
 8936   format %{ "SLW     $dst, $src1, $src2" %}
 8937   size(4);
 8938   ins_encode %{
 8939     __ slw($dst$$Register, $src1$$Register, $src2$$Register);
 8940   %}
 8941   ins_pipe(pipe_class_default);
 8942 %}
 8943 
 8944 instruct lShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 8945   match(Set dst (LShiftI src1 src2));
 8946   ins_cost(DEFAULT_COST*2);
 8947   expand %{
 8948     uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
 8949     iRegIdst tmpI;
 8950     maskI_reg_imm(tmpI, src2, mask);
 8951     lShiftI_reg_reg(dst, src1, tmpI);
 8952   %}
 8953 %}
 8954 
 8955 // Register Shift Left Immediate
 8956 instruct lShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
 8957   match(Set dst (LShiftI src1 src2));
 8958 
 8959   format %{ "SLWI    $dst, $src1, ($src2 & 0x1f)" %}
 8960   size(4);
 8961   ins_encode %{
 8962     __ slwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
 8963   %}
 8964   ins_pipe(pipe_class_default);
 8965 %}
 8966 
 8967 // AndI with negpow2-constant + LShiftI
 8968 instruct lShiftI_andI_immInegpow2_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
 8969   match(Set dst (LShiftI (AndI src1 src2) src3));
 8970   predicate(UseRotateAndMaskInstructionsPPC64);
 8971 
 8972   format %{ "RLWINM  $dst, lShiftI(AndI($src1, $src2), $src3)" %}
 8973   size(4);
 8974   ins_encode %{
 8975     long src3      = $src3$$constant;
 8976     long maskbits  = src3 + log2i_exact(-(juint)$src2$$constant);
 8977     if (maskbits >= 32) {
 8978       __ li($dst$$Register, 0); // addi
 8979     } else {
 8980       __ rlwinm($dst$$Register, $src1$$Register, src3 & 0x1f, 0, (31-maskbits) & 0x1f);
 8981     }
 8982   %}
 8983   ins_pipe(pipe_class_default);
 8984 %}
 8985 
 8986 // RShiftI + AndI with negpow2-constant + LShiftI
 8987 instruct lShiftI_andI_immInegpow2_rShiftI_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
 8988   match(Set dst (LShiftI (AndI (RShiftI src1 src3) src2) src3));
 8989   predicate(UseRotateAndMaskInstructionsPPC64);
 8990 
 8991   format %{ "RLWINM  $dst, lShiftI(AndI(RShiftI($src1, $src3), $src2), $src3)" %}
 8992   size(4);
 8993   ins_encode %{
 8994     long src3      = $src3$$constant;
 8995     long maskbits  = src3 + log2i_exact(-(juint)$src2$$constant);
 8996     if (maskbits >= 32) {
 8997       __ li($dst$$Register, 0); // addi
 8998     } else {
 8999       __ rlwinm($dst$$Register, $src1$$Register, 0, 0, (31-maskbits) & 0x1f);
 9000     }
 9001   %}
 9002   ins_pipe(pipe_class_default);
 9003 %}
 9004 
 9005 instruct lShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9006   // no match-rule, false predicate
 9007   effect(DEF dst, USE src1, USE src2);
 9008   predicate(false);
 9009 
 9010   format %{ "SLD     $dst, $src1, $src2" %}
 9011   size(4);
 9012   ins_encode %{
 9013     __ sld($dst$$Register, $src1$$Register, $src2$$Register);
 9014   %}
 9015   ins_pipe(pipe_class_default);
 9016 %}
 9017 
 9018 // Register Shift Left
 9019 instruct lShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9020   match(Set dst (LShiftL src1 src2));
 9021   ins_cost(DEFAULT_COST*2);
 9022   expand %{
 9023     uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
 9024     iRegIdst tmpI;
 9025     maskI_reg_imm(tmpI, src2, mask);
 9026     lShiftL_regL_regI(dst, src1, tmpI);
 9027   %}
 9028 %}
 9029 
 9030 // Register Shift Left Immediate
 9031 instruct lshiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
 9032   match(Set dst (LShiftL src1 src2));
 9033   format %{ "SLDI    $dst, $src1, ($src2 & 0x3f)" %}
 9034   size(4);
 9035   ins_encode %{
 9036     __ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9037   %}
 9038   ins_pipe(pipe_class_default);
 9039 %}
 9040 
 9041 // If we shift more than 32 bits, we need not convert I2L.
 9042 instruct lShiftL_regI_immGE32(iRegLdst dst, iRegIsrc src1, uimmI6_ge32 src2) %{
 9043   match(Set dst (LShiftL (ConvI2L src1) src2));
 9044   ins_cost(DEFAULT_COST);
 9045 
 9046   size(4);
 9047   format %{ "SLDI    $dst, i2l($src1), $src2" %}
 9048   ins_encode %{
 9049     __ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9050   %}
 9051   ins_pipe(pipe_class_default);
 9052 %}
 9053 
 9054 // Shift a postivie int to the left.
 9055 // Clrlsldi clears the upper 32 bits and shifts.
 9056 instruct scaledPositiveI2L_lShiftL_convI2L_reg_imm6(iRegLdst dst, iRegIsrc src1, uimmI6 src2) %{
 9057   match(Set dst (LShiftL (ConvI2L src1) src2));
 9058   predicate(((ConvI2LNode*)(_kids[0]->_leaf))->type()->is_long()->is_positive_int());
 9059 
 9060   format %{ "SLDI    $dst, i2l(positive_int($src1)), $src2" %}
 9061   size(4);
 9062   ins_encode %{
 9063     __ clrlsldi($dst$$Register, $src1$$Register, 0x20, $src2$$constant);
 9064   %}
 9065   ins_pipe(pipe_class_default);
 9066 %}
 9067 
 9068 instruct arShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9069   // no match-rule, false predicate
 9070   effect(DEF dst, USE src1, USE src2);
 9071   predicate(false);
 9072 
 9073   format %{ "SRAW    $dst, $src1, $src2" %}
 9074   size(4);
 9075   ins_encode %{
 9076     __ sraw($dst$$Register, $src1$$Register, $src2$$Register);
 9077   %}
 9078   ins_pipe(pipe_class_default);
 9079 %}
 9080 
 9081 // Register Arithmetic Shift Right
 9082 instruct arShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9083   match(Set dst (RShiftI src1 src2));
 9084   ins_cost(DEFAULT_COST*2);
 9085   expand %{
 9086     uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
 9087     iRegIdst tmpI;
 9088     maskI_reg_imm(tmpI, src2, mask);
 9089     arShiftI_reg_reg(dst, src1, tmpI);
 9090   %}
 9091 %}
 9092 
 9093 // Register Arithmetic Shift Right Immediate
 9094 instruct arShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
 9095   match(Set dst (RShiftI src1 src2));
 9096 
 9097   format %{ "SRAWI   $dst, $src1, ($src2 & 0x1f)" %}
 9098   size(4);
 9099   ins_encode %{
 9100     __ srawi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
 9101   %}
 9102   ins_pipe(pipe_class_default);
 9103 %}
 9104 
 9105 instruct arShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9106   // no match-rule, false predicate
 9107   effect(DEF dst, USE src1, USE src2);
 9108   predicate(false);
 9109 
 9110   format %{ "SRAD    $dst, $src1, $src2" %}
 9111   size(4);
 9112   ins_encode %{
 9113     __ srad($dst$$Register, $src1$$Register, $src2$$Register);
 9114   %}
 9115   ins_pipe(pipe_class_default);
 9116 %}
 9117 
 9118 // Register Shift Right Arithmetic Long
 9119 instruct arShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9120   match(Set dst (RShiftL src1 src2));
 9121   ins_cost(DEFAULT_COST*2);
 9122 
 9123   expand %{
 9124     uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
 9125     iRegIdst tmpI;
 9126     maskI_reg_imm(tmpI, src2, mask);
 9127     arShiftL_regL_regI(dst, src1, tmpI);
 9128   %}
 9129 %}
 9130 
 9131 // Register Shift Right Immediate
 9132 instruct arShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
 9133   match(Set dst (RShiftL src1 src2));
 9134 
 9135   format %{ "SRADI   $dst, $src1, ($src2 & 0x3f)" %}
 9136   size(4);
 9137   ins_encode %{
 9138     __ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9139   %}
 9140   ins_pipe(pipe_class_default);
 9141 %}
 9142 
 9143 // RShiftL + ConvL2I
 9144 instruct convL2I_arShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
 9145   match(Set dst (ConvL2I (RShiftL src1 src2)));
 9146 
 9147   format %{ "SRADI   $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
 9148   size(4);
 9149   ins_encode %{
 9150     __ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9151   %}
 9152   ins_pipe(pipe_class_default);
 9153 %}
 9154 
 9155 instruct urShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9156   // no match-rule, false predicate
 9157   effect(DEF dst, USE src1, USE src2);
 9158   predicate(false);
 9159 
 9160   format %{ "SRW     $dst, $src1, $src2" %}
 9161   size(4);
 9162   ins_encode %{
 9163     __ srw($dst$$Register, $src1$$Register, $src2$$Register);
 9164   %}
 9165   ins_pipe(pipe_class_default);
 9166 %}
 9167 
 9168 // Register Shift Right
 9169 instruct urShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9170   match(Set dst (URShiftI src1 src2));
 9171   ins_cost(DEFAULT_COST*2);
 9172 
 9173   expand %{
 9174     uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
 9175     iRegIdst tmpI;
 9176     maskI_reg_imm(tmpI, src2, mask);
 9177     urShiftI_reg_reg(dst, src1, tmpI);
 9178   %}
 9179 %}
 9180 
 9181 // Register Shift Right Immediate
 9182 instruct urShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
 9183   match(Set dst (URShiftI src1 src2));
 9184 
 9185   format %{ "SRWI    $dst, $src1, ($src2 & 0x1f)" %}
 9186   size(4);
 9187   ins_encode %{
 9188     __ srwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
 9189   %}
 9190   ins_pipe(pipe_class_default);
 9191 %}
 9192 
 9193 instruct urShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9194   // no match-rule, false predicate
 9195   effect(DEF dst, USE src1, USE src2);
 9196   predicate(false);
 9197 
 9198   format %{ "SRD     $dst, $src1, $src2" %}
 9199   size(4);
 9200   ins_encode %{
 9201     __ srd($dst$$Register, $src1$$Register, $src2$$Register);
 9202   %}
 9203   ins_pipe(pipe_class_default);
 9204 %}
 9205 
 9206 // Register Shift Right
 9207 instruct urShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
 9208   match(Set dst (URShiftL src1 src2));
 9209   ins_cost(DEFAULT_COST*2);
 9210 
 9211   expand %{
 9212     uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
 9213     iRegIdst tmpI;
 9214     maskI_reg_imm(tmpI, src2, mask);
 9215     urShiftL_regL_regI(dst, src1, tmpI);
 9216   %}
 9217 %}
 9218 
 9219 // Register Shift Right Immediate
 9220 instruct urShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
 9221   match(Set dst (URShiftL src1 src2));
 9222 
 9223   format %{ "SRDI    $dst, $src1, ($src2 & 0x3f)" %}
 9224   size(4);
 9225   ins_encode %{
 9226     __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9227   %}
 9228   ins_pipe(pipe_class_default);
 9229 %}
 9230 
 9231 // URShiftL + ConvL2I.
 9232 instruct convL2I_urShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
 9233   match(Set dst (ConvL2I (URShiftL src1 src2)));
 9234 
 9235   format %{ "SRDI    $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
 9236   size(4);
 9237   ins_encode %{
 9238     __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9239   %}
 9240   ins_pipe(pipe_class_default);
 9241 %}
 9242 
 9243 // Register Shift Right Immediate with a CastP2X
 9244 instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{
 9245   match(Set dst (URShiftL (CastP2X src1) src2));
 9246 
 9247   format %{ "SRDI    $dst, $src1, $src2 \t// Cast ptr $src1 to long and shift" %}
 9248   size(4);
 9249   ins_encode %{
 9250     __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
 9251   %}
 9252   ins_pipe(pipe_class_default);
 9253 %}
 9254 
 9255 // Bitfield Extract: URShiftI + AndI
 9256 instruct andI_urShiftI_regI_immI_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immI src2, immIpow2minus1 src3) %{
 9257   match(Set dst (AndI (URShiftI src1 src2) src3));
 9258 
 9259   format %{ "EXTRDI  $dst, $src1, shift=$src2, mask=$src3 \t// int bitfield extract" %}
 9260   size(4);
 9261   ins_encode %{
 9262     int rshift = ($src2$$constant) & 0x1f;
 9263     int length = log2i_exact((juint)$src3$$constant + 1u);
 9264     if (rshift + length > 32) {
 9265       // if necessary, adjust mask to omit rotated bits.
 9266       length = 32 - rshift;
 9267     }
 9268     __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
 9269   %}
 9270   ins_pipe(pipe_class_default);
 9271 %}
 9272 
 9273 // Bitfield Extract: URShiftL + AndL
 9274 instruct andL_urShiftL_regL_immI_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immI src2, immLpow2minus1 src3) %{
 9275   match(Set dst (AndL (URShiftL src1 src2) src3));
 9276 
 9277   format %{ "EXTRDI  $dst, $src1, shift=$src2, mask=$src3 \t// long bitfield extract" %}
 9278   size(4);
 9279   ins_encode %{
 9280     int rshift  = ($src2$$constant) & 0x3f;
 9281     int length = log2i_exact((julong)$src3$$constant + 1ull);
 9282     if (rshift + length > 64) {
 9283       // if necessary, adjust mask to omit rotated bits.
 9284       length = 64 - rshift;
 9285     }
 9286     __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
 9287   %}
 9288   ins_pipe(pipe_class_default);
 9289 %}
 9290 
 9291 instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{
 9292   match(Set dst (ConvL2I (ConvI2L src)));
 9293 
 9294   format %{ "EXTSW   $dst, $src \t// int->int" %}
 9295   size(4);
 9296   ins_encode %{
 9297     __ extsw($dst$$Register, $src$$Register);
 9298   %}
 9299   ins_pipe(pipe_class_default);
 9300 %}
 9301 
 9302 //----------Rotate Instructions------------------------------------------------
 9303 
 9304 // Rotate Left by 8-bit immediate
 9305 instruct rotlI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 lshift, immI8 rshift) %{
 9306   match(Set dst (OrI (LShiftI src lshift) (URShiftI src rshift)));
 9307   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
 9308 
 9309   format %{ "ROTLWI  $dst, $src, $lshift" %}
 9310   size(4);
 9311   ins_encode %{
 9312     __ rotlwi($dst$$Register, $src$$Register, $lshift$$constant);
 9313   %}
 9314   ins_pipe(pipe_class_default);
 9315 %}
 9316 
 9317 // Rotate Right by 8-bit immediate
 9318 instruct rotrI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 rshift, immI8 lshift) %{
 9319   match(Set dst (OrI (URShiftI src rshift) (LShiftI src lshift)));
 9320   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
 9321 
 9322   format %{ "ROTRWI  $dst, $rshift" %}
 9323   size(4);
 9324   ins_encode %{
 9325     __ rotrwi($dst$$Register, $src$$Register, $rshift$$constant);
 9326   %}
 9327   ins_pipe(pipe_class_default);
 9328 %}
 9329 
 9330 //----------Floating Point Arithmetic Instructions-----------------------------
 9331 
 9332 // Add float single precision
 9333 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
 9334   match(Set dst (AddF src1 src2));
 9335 
 9336   format %{ "FADDS   $dst, $src1, $src2" %}
 9337   size(4);
 9338   ins_encode %{
 9339     __ fadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9340   %}
 9341   ins_pipe(pipe_class_default);
 9342 %}
 9343 
 9344 // Add float double precision
 9345 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
 9346   match(Set dst (AddD src1 src2));
 9347 
 9348   format %{ "FADD    $dst, $src1, $src2" %}
 9349   size(4);
 9350   ins_encode %{
 9351     __ fadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9352   %}
 9353   ins_pipe(pipe_class_default);
 9354 %}
 9355 
 9356 // Sub float single precision
 9357 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
 9358   match(Set dst (SubF src1 src2));
 9359 
 9360   format %{ "FSUBS   $dst, $src1, $src2" %}
 9361   size(4);
 9362   ins_encode %{
 9363     __ fsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9364   %}
 9365   ins_pipe(pipe_class_default);
 9366 %}
 9367 
 9368 // Sub float double precision
 9369 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
 9370   match(Set dst (SubD src1 src2));
 9371   format %{ "FSUB    $dst, $src1, $src2" %}
 9372   size(4);
 9373   ins_encode %{
 9374     __ fsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9375   %}
 9376   ins_pipe(pipe_class_default);
 9377 %}
 9378 
 9379 // Mul float single precision
 9380 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
 9381   match(Set dst (MulF src1 src2));
 9382   format %{ "FMULS   $dst, $src1, $src2" %}
 9383   size(4);
 9384   ins_encode %{
 9385     __ fmuls($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9386   %}
 9387   ins_pipe(pipe_class_default);
 9388 %}
 9389 
 9390 // Mul float double precision
 9391 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
 9392   match(Set dst (MulD src1 src2));
 9393   format %{ "FMUL    $dst, $src1, $src2" %}
 9394   size(4);
 9395   ins_encode %{
 9396     __ fmul($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9397   %}
 9398   ins_pipe(pipe_class_default);
 9399 %}
 9400 
 9401 // Div float single precision
 9402 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
 9403   match(Set dst (DivF src1 src2));
 9404   format %{ "FDIVS   $dst, $src1, $src2" %}
 9405   size(4);
 9406   ins_encode %{
 9407     __ fdivs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9408   %}
 9409   ins_pipe(pipe_class_default);
 9410 %}
 9411 
 9412 // Div float double precision
 9413 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
 9414   match(Set dst (DivD src1 src2));
 9415   format %{ "FDIV    $dst, $src1, $src2" %}
 9416   size(4);
 9417   ins_encode %{
 9418     __ fdiv($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
 9419   %}
 9420   ins_pipe(pipe_class_default);
 9421 %}
 9422 
 9423 // Absolute float single precision
 9424 instruct absF_reg(regF dst, regF src) %{
 9425   match(Set dst (AbsF src));
 9426   format %{ "FABS    $dst, $src \t// float" %}
 9427   size(4);
 9428   ins_encode %{
 9429     __ fabs($dst$$FloatRegister, $src$$FloatRegister);
 9430   %}
 9431   ins_pipe(pipe_class_default);
 9432 %}
 9433 
 9434 // Absolute float double precision
 9435 instruct absD_reg(regD dst, regD src) %{
 9436   match(Set dst (AbsD src));
 9437   format %{ "FABS    $dst, $src \t// double" %}
 9438   size(4);
 9439   ins_encode %{
 9440     __ fabs($dst$$FloatRegister, $src$$FloatRegister);
 9441   %}
 9442   ins_pipe(pipe_class_default);
 9443 %}
 9444 
 9445 instruct negF_reg(regF dst, regF src) %{
 9446   match(Set dst (NegF src));
 9447   format %{ "FNEG    $dst, $src \t// float" %}
 9448   size(4);
 9449   ins_encode %{
 9450     __ fneg($dst$$FloatRegister, $src$$FloatRegister);
 9451   %}
 9452   ins_pipe(pipe_class_default);
 9453 %}
 9454 
 9455 instruct negD_reg(regD dst, regD src) %{
 9456   match(Set dst (NegD src));
 9457   format %{ "FNEG    $dst, $src \t// double" %}
 9458   size(4);
 9459   ins_encode %{
 9460     __ fneg($dst$$FloatRegister, $src$$FloatRegister);
 9461   %}
 9462   ins_pipe(pipe_class_default);
 9463 %}
 9464 
 9465 // AbsF + NegF.
 9466 instruct negF_absF_reg(regF dst, regF src) %{
 9467   match(Set dst (NegF (AbsF src)));
 9468   format %{ "FNABS   $dst, $src \t// float" %}
 9469   size(4);
 9470   ins_encode %{
 9471     __ fnabs($dst$$FloatRegister, $src$$FloatRegister);
 9472   %}
 9473   ins_pipe(pipe_class_default);
 9474 %}
 9475 
 9476 // AbsD + NegD.
 9477 instruct negD_absD_reg(regD dst, regD src) %{
 9478   match(Set dst (NegD (AbsD src)));
 9479   format %{ "FNABS   $dst, $src \t// double" %}
 9480   size(4);
 9481   ins_encode %{
 9482     __ fnabs($dst$$FloatRegister, $src$$FloatRegister);
 9483   %}
 9484   ins_pipe(pipe_class_default);
 9485 %}
 9486 
 9487 // VM_Version::has_fsqrt() decides if this node will be used.
 9488 // Sqrt float double precision
 9489 instruct sqrtD_reg(regD dst, regD src) %{
 9490   match(Set dst (SqrtD src));
 9491   format %{ "FSQRT   $dst, $src" %}
 9492   size(4);
 9493   ins_encode %{
 9494     __ fsqrt($dst$$FloatRegister, $src$$FloatRegister);
 9495   %}
 9496   ins_pipe(pipe_class_default);
 9497 %}
 9498 
 9499 // Single-precision sqrt.
 9500 instruct sqrtF_reg(regF dst, regF src) %{
 9501   match(Set dst (SqrtF src));
 9502   predicate(VM_Version::has_fsqrts());
 9503   ins_cost(DEFAULT_COST);
 9504 
 9505   format %{ "FSQRTS  $dst, $src" %}
 9506   size(4);
 9507   ins_encode %{
 9508     __ fsqrts($dst$$FloatRegister, $src$$FloatRegister);
 9509   %}
 9510   ins_pipe(pipe_class_default);
 9511 %}
 9512 
 9513 instruct roundDouble_nop(regD dst) %{
 9514   match(Set dst (RoundDouble dst));
 9515   ins_cost(0);
 9516 
 9517   format %{ " -- \t// RoundDouble not needed - empty" %}
 9518   size(0);
 9519   // PPC results are already "rounded" (i.e., normal-format IEEE).
 9520   ins_encode( /*empty*/ );
 9521   ins_pipe(pipe_class_default);
 9522 %}
 9523 
 9524 instruct roundFloat_nop(regF dst) %{
 9525   match(Set dst (RoundFloat dst));
 9526   ins_cost(0);
 9527 
 9528   format %{ " -- \t// RoundFloat not needed - empty" %}
 9529   size(0);
 9530   // PPC results are already "rounded" (i.e., normal-format IEEE).
 9531   ins_encode( /*empty*/ );
 9532   ins_pipe(pipe_class_default);
 9533 %}
 9534 
 9535 
 9536 // Multiply-Accumulate
 9537 // src1 * src2 + src3
 9538 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
 9539   match(Set dst (FmaF src3 (Binary src1 src2)));
 9540 
 9541   format %{ "FMADDS  $dst, $src1, $src2, $src3" %}
 9542   size(4);
 9543   ins_encode %{
 9544     assert(UseFMA, "Needs FMA instructions support.");
 9545     __ fmadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9546   %}
 9547   ins_pipe(pipe_class_default);
 9548 %}
 9549 
 9550 // src1 * src2 + src3
 9551 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
 9552   match(Set dst (FmaD src3 (Binary src1 src2)));
 9553 
 9554   format %{ "FMADD   $dst, $src1, $src2, $src3" %}
 9555   size(4);
 9556   ins_encode %{
 9557     assert(UseFMA, "Needs FMA instructions support.");
 9558     __ fmadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9559   %}
 9560   ins_pipe(pipe_class_default);
 9561 %}
 9562 
 9563 // src1 * (-src2) + src3 = -(src1*src2-src3)
 9564 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 9565 instruct mnsubF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
 9566   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
 9567 
 9568   format %{ "FNMSUBS $dst, $src1, $src2, $src3" %}
 9569   size(4);
 9570   ins_encode %{
 9571     assert(UseFMA, "Needs FMA instructions support.");
 9572     __ fnmsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9573   %}
 9574   ins_pipe(pipe_class_default);
 9575 %}
 9576 
 9577 // src1 * (-src2) + src3 = -(src1*src2-src3)
 9578 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 9579 instruct mnsubD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
 9580   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
 9581 
 9582   format %{ "FNMSUB  $dst, $src1, $src2, $src3" %}
 9583   size(4);
 9584   ins_encode %{
 9585     assert(UseFMA, "Needs FMA instructions support.");
 9586     __ fnmsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9587   %}
 9588   ins_pipe(pipe_class_default);
 9589 %}
 9590 
 9591 // src1 * (-src2) - src3 = -(src1*src2+src3)
 9592 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 9593 instruct mnaddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
 9594   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
 9595 
 9596   format %{ "FNMADDS $dst, $src1, $src2, $src3" %}
 9597   size(4);
 9598   ins_encode %{
 9599     assert(UseFMA, "Needs FMA instructions support.");
 9600     __ fnmadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9601   %}
 9602   ins_pipe(pipe_class_default);
 9603 %}
 9604 
 9605 // src1 * (-src2) - src3 = -(src1*src2+src3)
 9606 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 9607 instruct mnaddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
 9608   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
 9609 
 9610   format %{ "FNMADD  $dst, $src1, $src2, $src3" %}
 9611   size(4);
 9612   ins_encode %{
 9613     assert(UseFMA, "Needs FMA instructions support.");
 9614     __ fnmadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9615   %}
 9616   ins_pipe(pipe_class_default);
 9617 %}
 9618 
 9619 // src1 * src2 - src3
 9620 instruct msubF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
 9621   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
 9622 
 9623   format %{ "FMSUBS  $dst, $src1, $src2, $src3" %}
 9624   size(4);
 9625   ins_encode %{
 9626     assert(UseFMA, "Needs FMA instructions support.");
 9627     __ fmsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9628   %}
 9629   ins_pipe(pipe_class_default);
 9630 %}
 9631 
 9632 // src1 * src2 - src3
 9633 instruct msubD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
 9634   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
 9635 
 9636   format %{ "FMSUB   $dst, $src1, $src2, $src3" %}
 9637   size(4);
 9638   ins_encode %{
 9639     assert(UseFMA, "Needs FMA instructions support.");
 9640     __ fmsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 9641   %}
 9642   ins_pipe(pipe_class_default);
 9643 %}
 9644 
 9645 
 9646 //----------Logical Instructions-----------------------------------------------
 9647 
 9648 // And Instructions
 9649 
 9650 // Register And
 9651 instruct andI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9652   match(Set dst (AndI src1 src2));
 9653   format %{ "AND     $dst, $src1, $src2" %}
 9654   size(4);
 9655   ins_encode %{
 9656     __ andr($dst$$Register, $src1$$Register, $src2$$Register);
 9657   %}
 9658   ins_pipe(pipe_class_default);
 9659 %}
 9660 
 9661 // Left shifted Immediate And
 9662 instruct andI_reg_immIhi16(iRegIdst dst, iRegIsrc src1, immIhi16  src2, flagsRegCR0 cr0) %{
 9663   match(Set dst (AndI src1 src2));
 9664   effect(KILL cr0);
 9665   format %{ "ANDIS   $dst, $src1, $src2.hi" %}
 9666   size(4);
 9667   ins_encode %{
 9668     __ andis_($dst$$Register, $src1$$Register, (int)((unsigned short)(($src2$$constant & 0xFFFF0000) >> 16)));
 9669   %}
 9670   ins_pipe(pipe_class_default);
 9671 %}
 9672 
 9673 // Immediate And
 9674 instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{
 9675   match(Set dst (AndI src1 src2));
 9676   effect(KILL cr0);
 9677 
 9678   format %{ "ANDI    $dst, $src1, $src2" %}
 9679   size(4);
 9680   ins_encode %{
 9681     // FIXME: avoid andi_ ?
 9682     __ andi_($dst$$Register, $src1$$Register, $src2$$constant);
 9683   %}
 9684   ins_pipe(pipe_class_default);
 9685 %}
 9686 
 9687 // Immediate And where the immediate is a negative power of 2.
 9688 instruct andI_reg_immInegpow2(iRegIdst dst, iRegIsrc src1, immInegpow2 src2) %{
 9689   match(Set dst (AndI src1 src2));
 9690   format %{ "ANDWI   $dst, $src1, $src2" %}
 9691   size(4);
 9692   ins_encode %{
 9693     __ clrrdi($dst$$Register, $src1$$Register, log2i_exact(-(juint)$src2$$constant));
 9694   %}
 9695   ins_pipe(pipe_class_default);
 9696 %}
 9697 
 9698 instruct andI_reg_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immIpow2minus1 src2) %{
 9699   match(Set dst (AndI src1 src2));
 9700   format %{ "ANDWI   $dst, $src1, $src2" %}
 9701   size(4);
 9702   ins_encode %{
 9703     __ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((juint)$src2$$constant + 1u));
 9704   %}
 9705   ins_pipe(pipe_class_default);
 9706 %}
 9707 
 9708 instruct andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{
 9709   match(Set dst (AndI src1 src2));
 9710   predicate(UseRotateAndMaskInstructionsPPC64);
 9711   format %{ "ANDWI   $dst, $src1, $src2" %}
 9712   size(4);
 9713   ins_encode %{
 9714     int bitpos = 31 - log2i_exact((juint)$src2$$constant);
 9715     __ rlwinm($dst$$Register, $src1$$Register, 0, bitpos, bitpos);
 9716   %}
 9717   ins_pipe(pipe_class_default);
 9718 %}
 9719 
 9720 // Register And Long
 9721 instruct andL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9722   match(Set dst (AndL src1 src2));
 9723   ins_cost(DEFAULT_COST);
 9724 
 9725   format %{ "AND     $dst, $src1, $src2 \t// long" %}
 9726   size(4);
 9727   ins_encode %{
 9728     __ andr($dst$$Register, $src1$$Register, $src2$$Register);
 9729   %}
 9730   ins_pipe(pipe_class_default);
 9731 %}
 9732 
 9733 // Immediate And long
 9734 instruct andL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2, flagsRegCR0 cr0) %{
 9735   match(Set dst (AndL src1 src2));
 9736   effect(KILL cr0);
 9737 
 9738   format %{ "ANDI    $dst, $src1, $src2 \t// long" %}
 9739   size(4);
 9740   ins_encode %{
 9741     // FIXME: avoid andi_ ?
 9742     __ andi_($dst$$Register, $src1$$Register, $src2$$constant);
 9743   %}
 9744   ins_pipe(pipe_class_default);
 9745 %}
 9746 
 9747 // Immediate And Long where the immediate is a negative power of 2.
 9748 instruct andL_reg_immLnegpow2(iRegLdst dst, iRegLsrc src1, immLnegpow2 src2) %{
 9749   match(Set dst (AndL src1 src2));
 9750   format %{ "ANDDI   $dst, $src1, $src2" %}
 9751   size(4);
 9752   ins_encode %{
 9753     __ clrrdi($dst$$Register, $src1$$Register, log2i_exact(-(julong)$src2$$constant));
 9754   %}
 9755   ins_pipe(pipe_class_default);
 9756 %}
 9757 
 9758 instruct andL_reg_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
 9759   match(Set dst (AndL src1 src2));
 9760   format %{ "ANDDI   $dst, $src1, $src2" %}
 9761   size(4);
 9762   ins_encode %{
 9763     __ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((julong)$src2$$constant + 1ull));
 9764   %}
 9765   ins_pipe(pipe_class_default);
 9766 %}
 9767 
 9768 // AndL + ConvL2I.
 9769 instruct convL2I_andL_reg_immLpow2minus1(iRegIdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
 9770   match(Set dst (ConvL2I (AndL src1 src2)));
 9771   ins_cost(DEFAULT_COST);
 9772 
 9773   format %{ "ANDDI   $dst, $src1, $src2 \t// long + l2i" %}
 9774   size(4);
 9775   ins_encode %{
 9776     __ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((julong)$src2$$constant + 1ull));
 9777   %}
 9778   ins_pipe(pipe_class_default);
 9779 %}
 9780 
 9781 // Or Instructions
 9782 
 9783 // Register Or
 9784 instruct orI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9785   match(Set dst (OrI src1 src2));
 9786   format %{ "OR      $dst, $src1, $src2" %}
 9787   size(4);
 9788   ins_encode %{
 9789     __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
 9790   %}
 9791   ins_pipe(pipe_class_default);
 9792 %}
 9793 
 9794 // Expand does not work with above instruct. (??)
 9795 instruct orI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9796   // no match-rule
 9797   effect(DEF dst, USE src1, USE src2);
 9798   format %{ "OR      $dst, $src1, $src2" %}
 9799   size(4);
 9800   ins_encode %{
 9801     __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
 9802   %}
 9803   ins_pipe(pipe_class_default);
 9804 %}
 9805 
 9806 instruct tree_orI_orI_orI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
 9807   match(Set dst (OrI (OrI (OrI src1 src2) src3) src4));
 9808   ins_cost(DEFAULT_COST*3);
 9809 
 9810   expand %{
 9811     // FIXME: we should do this in the ideal world.
 9812     iRegIdst tmp1;
 9813     iRegIdst tmp2;
 9814     orI_reg_reg(tmp1, src1, src2);
 9815     orI_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
 9816     orI_reg_reg(dst, tmp1, tmp2);
 9817   %}
 9818 %}
 9819 
 9820 // Immediate Or
 9821 instruct orI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
 9822   match(Set dst (OrI src1 src2));
 9823   format %{ "ORI     $dst, $src1, $src2" %}
 9824   size(4);
 9825   ins_encode %{
 9826     __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
 9827   %}
 9828   ins_pipe(pipe_class_default);
 9829 %}
 9830 
 9831 // Register Or Long
 9832 instruct orL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9833   match(Set dst (OrL src1 src2));
 9834   ins_cost(DEFAULT_COST);
 9835 
 9836   size(4);
 9837   format %{ "OR      $dst, $src1, $src2 \t// long" %}
 9838   ins_encode %{
 9839     __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
 9840   %}
 9841   ins_pipe(pipe_class_default);
 9842 %}
 9843 
 9844 // OrL + ConvL2I.
 9845 instruct orI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9846   match(Set dst (ConvL2I (OrL src1 src2)));
 9847   ins_cost(DEFAULT_COST);
 9848 
 9849   format %{ "OR      $dst, $src1, $src2 \t// long + l2i" %}
 9850   size(4);
 9851   ins_encode %{
 9852     __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
 9853   %}
 9854   ins_pipe(pipe_class_default);
 9855 %}
 9856 
 9857 // Immediate Or long
 9858 instruct orL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 con) %{
 9859   match(Set dst (OrL src1 con));
 9860   ins_cost(DEFAULT_COST);
 9861 
 9862   format %{ "ORI     $dst, $src1, $con \t// long" %}
 9863   size(4);
 9864   ins_encode %{
 9865     __ ori($dst$$Register, $src1$$Register, ($con$$constant) & 0xFFFF);
 9866   %}
 9867   ins_pipe(pipe_class_default);
 9868 %}
 9869 
 9870 // Xor Instructions
 9871 
 9872 // Register Xor
 9873 instruct xorI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9874   match(Set dst (XorI src1 src2));
 9875   format %{ "XOR     $dst, $src1, $src2" %}
 9876   size(4);
 9877   ins_encode %{
 9878     __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
 9879   %}
 9880   ins_pipe(pipe_class_default);
 9881 %}
 9882 
 9883 // Expand does not work with above instruct. (??)
 9884 instruct xorI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
 9885   // no match-rule
 9886   effect(DEF dst, USE src1, USE src2);
 9887   format %{ "XOR     $dst, $src1, $src2" %}
 9888   size(4);
 9889   ins_encode %{
 9890     __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
 9891   %}
 9892   ins_pipe(pipe_class_default);
 9893 %}
 9894 
 9895 instruct tree_xorI_xorI_xorI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
 9896   match(Set dst (XorI (XorI (XorI src1 src2) src3) src4));
 9897   ins_cost(DEFAULT_COST*3);
 9898 
 9899   expand %{
 9900     // FIXME: we should do this in the ideal world.
 9901     iRegIdst tmp1;
 9902     iRegIdst tmp2;
 9903     xorI_reg_reg(tmp1, src1, src2);
 9904     xorI_reg_reg_2(tmp2, src3, src4); // Adlc complains about xorI_reg_reg.
 9905     xorI_reg_reg(dst, tmp1, tmp2);
 9906   %}
 9907 %}
 9908 
 9909 // Immediate Xor
 9910 instruct xorI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
 9911   match(Set dst (XorI src1 src2));
 9912   format %{ "XORI    $dst, $src1, $src2" %}
 9913   size(4);
 9914   ins_encode %{
 9915     __ xori($dst$$Register, $src1$$Register, $src2$$constant);
 9916   %}
 9917   ins_pipe(pipe_class_default);
 9918 %}
 9919 
 9920 // Register Xor Long
 9921 instruct xorL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9922   match(Set dst (XorL src1 src2));
 9923   ins_cost(DEFAULT_COST);
 9924 
 9925   format %{ "XOR     $dst, $src1, $src2 \t// long" %}
 9926   size(4);
 9927   ins_encode %{
 9928     __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
 9929   %}
 9930   ins_pipe(pipe_class_default);
 9931 %}
 9932 
 9933 // XorL + ConvL2I.
 9934 instruct xorI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9935   match(Set dst (ConvL2I (XorL src1 src2)));
 9936   ins_cost(DEFAULT_COST);
 9937 
 9938   format %{ "XOR     $dst, $src1, $src2 \t// long + l2i" %}
 9939   size(4);
 9940   ins_encode %{
 9941     __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
 9942   %}
 9943   ins_pipe(pipe_class_default);
 9944 %}
 9945 
 9946 // Immediate Xor Long
 9947 instruct xorL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2) %{
 9948   match(Set dst (XorL src1 src2));
 9949   ins_cost(DEFAULT_COST);
 9950 
 9951   format %{ "XORI    $dst, $src1, $src2 \t// long" %}
 9952   size(4);
 9953   ins_encode %{
 9954     __ xori($dst$$Register, $src1$$Register, $src2$$constant);
 9955   %}
 9956   ins_pipe(pipe_class_default);
 9957 %}
 9958 
 9959 instruct notI_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
 9960   match(Set dst (XorI src1 src2));
 9961   ins_cost(DEFAULT_COST);
 9962 
 9963   format %{ "NOT     $dst, $src1 ($src2)" %}
 9964   size(4);
 9965   ins_encode %{
 9966     __ nor($dst$$Register, $src1$$Register, $src1$$Register);
 9967   %}
 9968   ins_pipe(pipe_class_default);
 9969 %}
 9970 
 9971 instruct notL_reg(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
 9972   match(Set dst (XorL src1 src2));
 9973   ins_cost(DEFAULT_COST);
 9974 
 9975   format %{ "NOT     $dst, $src1 ($src2) \t// long" %}
 9976   size(4);
 9977   ins_encode %{
 9978     __ nor($dst$$Register, $src1$$Register, $src1$$Register);
 9979   %}
 9980   ins_pipe(pipe_class_default);
 9981 %}
 9982 
 9983 // And-complement
 9984 instruct andcI_reg_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2, iRegIsrc src3) %{
 9985   match(Set dst (AndI (XorI src1 src2) src3));
 9986   ins_cost(DEFAULT_COST);
 9987 
 9988   format %{ "ANDW    $dst, xori($src1, $src2), $src3" %}
 9989   size(4);
 9990   ins_encode( enc_andc(dst, src3, src1) );
 9991   ins_pipe(pipe_class_default);
 9992 %}
 9993 
 9994 // And-complement
 9995 instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
 9996   // no match-rule, false predicate
 9997   effect(DEF dst, USE src1, USE src2);
 9998   predicate(false);
 9999 
10000   format %{ "ANDC    $dst, $src1, $src2" %}
10001   size(4);
10002   ins_encode %{
10003     __ andc($dst$$Register, $src1$$Register, $src2$$Register);
10004   %}
10005   ins_pipe(pipe_class_default);
10006 %}
10007 
10008 //----------Moves between int/long and float/double----------------------------
10009 //
10010 // The following rules move values from int/long registers/stack-locations
10011 // to float/double registers/stack-locations and vice versa, without doing any
10012 // conversions. These rules are used to implement the bit-conversion methods
10013 // of java.lang.Float etc., e.g.
10014 //   int   floatToIntBits(float value)
10015 //   float intBitsToFloat(int bits)
10016 //
10017 // Notes on the implementation on ppc64:
10018 // For Power7 and earlier, the rules are limited to those which move between a
10019 // register and a stack-location, because we always have to go through memory
10020 // when moving between a float register and an integer register.
10021 // This restriction is removed in Power8 with the introduction of the mtfprd
10022 // and mffprd instructions.
10023 
10024 instruct moveL2D_reg(regD dst, iRegLsrc src) %{
10025   match(Set dst (MoveL2D src));
10026   predicate(VM_Version::has_mtfprd());
10027 
10028   format %{ "MTFPRD  $dst, $src" %}
10029   size(4);
10030   ins_encode %{
10031     __ mtfprd($dst$$FloatRegister, $src$$Register);
10032   %}
10033   ins_pipe(pipe_class_default);
10034 %}
10035 
10036 instruct moveI2D_reg(regD dst, iRegIsrc src) %{
10037   // no match-rule, false predicate
10038   effect(DEF dst, USE src);
10039   predicate(false);
10040 
10041   format %{ "MTFPRWA $dst, $src" %}
10042   size(4);
10043   ins_encode %{
10044     __ mtfprwa($dst$$FloatRegister, $src$$Register);
10045   %}
10046   ins_pipe(pipe_class_default);
10047 %}
10048 
10049 //---------- Chain stack slots between similar types --------
10050 
10051 // These are needed so that the rules below can match.
10052 
10053 // Load integer from stack slot
10054 instruct stkI_to_regI(iRegIdst dst, stackSlotI src) %{
10055   match(Set dst src);
10056   ins_cost(MEMORY_REF_COST);
10057 
10058   format %{ "LWZ     $dst, $src" %}
10059   size(4);
10060   ins_encode( enc_lwz(dst, src) );
10061   ins_pipe(pipe_class_memory);
10062 %}
10063 
10064 // Store integer to stack slot
10065 instruct regI_to_stkI(stackSlotI dst, iRegIsrc src) %{
10066   match(Set dst src);
10067   ins_cost(MEMORY_REF_COST);
10068 
10069   format %{ "STW     $src, $dst \t// stk" %}
10070   size(4);
10071   ins_encode( enc_stw(src, dst) ); // rs=rt
10072   ins_pipe(pipe_class_memory);
10073 %}
10074 
10075 // Load long from stack slot
10076 instruct stkL_to_regL(iRegLdst dst, stackSlotL src) %{
10077   match(Set dst src);
10078   ins_cost(MEMORY_REF_COST);
10079 
10080   format %{ "LD      $dst, $src \t// long" %}
10081   size(4);
10082   ins_encode( enc_ld(dst, src) );
10083   ins_pipe(pipe_class_memory);
10084 %}
10085 
10086 // Store long to stack slot
10087 instruct regL_to_stkL(stackSlotL dst, iRegLsrc src) %{
10088   match(Set dst src);
10089   ins_cost(MEMORY_REF_COST);
10090 
10091   format %{ "STD     $src, $dst \t// long" %}
10092   size(4);
10093   ins_encode( enc_std(src, dst) ); // rs=rt
10094   ins_pipe(pipe_class_memory);
10095 %}
10096 
10097 //----------Moves between int and float
10098 
10099 // Move float value from float stack-location to integer register.
10100 instruct moveF2I_stack_reg(iRegIdst dst, stackSlotF src) %{
10101   match(Set dst (MoveF2I src));
10102   ins_cost(MEMORY_REF_COST);
10103 
10104   format %{ "LWZ     $dst, $src \t// MoveF2I" %}
10105   size(4);
10106   ins_encode( enc_lwz(dst, src) );
10107   ins_pipe(pipe_class_memory);
10108 %}
10109 
10110 // Move float value from float register to integer stack-location.
10111 instruct moveF2I_reg_stack(stackSlotI dst, regF src) %{
10112   match(Set dst (MoveF2I src));
10113   ins_cost(MEMORY_REF_COST);
10114 
10115   format %{ "STFS    $src, $dst \t// MoveF2I" %}
10116   size(4);
10117   ins_encode( enc_stfs(src, dst) );
10118   ins_pipe(pipe_class_memory);
10119 %}
10120 
10121 // Move integer value from integer stack-location to float register.
10122 instruct moveI2F_stack_reg(regF dst, stackSlotI src) %{
10123   match(Set dst (MoveI2F src));
10124   ins_cost(MEMORY_REF_COST);
10125 
10126   format %{ "LFS     $dst, $src \t// MoveI2F" %}
10127   size(4);
10128   ins_encode %{
10129     int Idisp = $src$$disp + frame_slots_bias($src$$base, ra_);
10130     __ lfs($dst$$FloatRegister, Idisp, $src$$base$$Register);
10131   %}
10132   ins_pipe(pipe_class_memory);
10133 %}
10134 
10135 // Move integer value from integer register to float stack-location.
10136 instruct moveI2F_reg_stack(stackSlotF dst, iRegIsrc src) %{
10137   match(Set dst (MoveI2F src));
10138   ins_cost(MEMORY_REF_COST);
10139 
10140   format %{ "STW     $src, $dst \t// MoveI2F" %}
10141   size(4);
10142   ins_encode( enc_stw(src, dst) );
10143   ins_pipe(pipe_class_memory);
10144 %}
10145 
10146 //----------Moves between long and float
10147 
10148 instruct moveF2L_reg_stack(stackSlotL dst, regF src) %{
10149   // no match-rule, false predicate
10150   effect(DEF dst, USE src);
10151   predicate(false);
10152 
10153   format %{ "storeD  $src, $dst \t// STACK" %}
10154   size(4);
10155   ins_encode( enc_stfd(src, dst) );
10156   ins_pipe(pipe_class_default);
10157 %}
10158 
10159 //----------Moves between long and double
10160 
10161 // Move double value from double stack-location to long register.
10162 instruct moveD2L_stack_reg(iRegLdst dst, stackSlotD src) %{
10163   match(Set dst (MoveD2L src));
10164   ins_cost(MEMORY_REF_COST);
10165   size(4);
10166   format %{ "LD      $dst, $src \t// MoveD2L" %}
10167   ins_encode( enc_ld(dst, src) );
10168   ins_pipe(pipe_class_memory);
10169 %}
10170 
10171 // Move double value from double register to long stack-location.
10172 instruct moveD2L_reg_stack(stackSlotL dst, regD src) %{
10173   match(Set dst (MoveD2L src));
10174   effect(DEF dst, USE src);
10175   ins_cost(MEMORY_REF_COST);
10176 
10177   format %{ "STFD    $src, $dst \t// MoveD2L" %}
10178   size(4);
10179   ins_encode( enc_stfd(src, dst) );
10180   ins_pipe(pipe_class_memory);
10181 %}
10182 
10183 // Move long value from long stack-location to double register.
10184 instruct moveL2D_stack_reg(regD dst, stackSlotL src) %{
10185   match(Set dst (MoveL2D src));
10186   ins_cost(MEMORY_REF_COST);
10187 
10188   format %{ "LFD     $dst, $src \t// MoveL2D" %}
10189   size(4);
10190   ins_encode( enc_lfd(dst, src) );
10191   ins_pipe(pipe_class_memory);
10192 %}
10193 
10194 // Move long value from long register to double stack-location.
10195 instruct moveL2D_reg_stack(stackSlotD dst, iRegLsrc src) %{
10196   match(Set dst (MoveL2D src));
10197   ins_cost(MEMORY_REF_COST);
10198 
10199   format %{ "STD     $src, $dst \t// MoveL2D" %}
10200   size(4);
10201   ins_encode( enc_std(src, dst) );
10202   ins_pipe(pipe_class_memory);
10203 %}
10204 
10205 //----------Register Move Instructions-----------------------------------------
10206 
10207 // Replicate for Superword
10208 
10209 instruct moveReg(iRegLdst dst, iRegIsrc src) %{
10210   predicate(false);
10211   effect(DEF dst, USE src);
10212 
10213   format %{ "MR      $dst, $src \t// replicate " %}
10214   // variable size, 0 or 4.
10215   ins_encode %{
10216     __ mr_if_needed($dst$$Register, $src$$Register);
10217   %}
10218   ins_pipe(pipe_class_default);
10219 %}
10220 
10221 //----------Cast instructions (Java-level type cast)---------------------------
10222 
10223 // Cast Long to Pointer for unsafe natives.
10224 instruct castX2P(iRegPdst dst, iRegLsrc src) %{
10225   match(Set dst (CastX2P src));
10226 
10227   format %{ "MR      $dst, $src \t// Long->Ptr" %}
10228   // variable size, 0 or 4.
10229   ins_encode %{
10230     __ mr_if_needed($dst$$Register, $src$$Register);
10231   %}
10232  ins_pipe(pipe_class_default);
10233 %}
10234 
10235 // Cast Pointer to Long for unsafe natives.
10236 instruct castP2X(iRegLdst dst, iRegP_N2P src) %{
10237   match(Set dst (CastP2X src));
10238 
10239   format %{ "MR      $dst, $src \t// Ptr->Long" %}
10240   // variable size, 0 or 4.
10241   ins_encode %{
10242     __ mr_if_needed($dst$$Register, $src$$Register);
10243   %}
10244   ins_pipe(pipe_class_default);
10245 %}
10246 
10247 instruct castPP(iRegPdst dst) %{
10248   match(Set dst (CastPP dst));
10249   format %{ " -- \t// castPP of $dst" %}
10250   size(0);
10251   ins_encode( /*empty*/ );
10252   ins_pipe(pipe_class_default);
10253 %}
10254 
10255 instruct castII(iRegIdst dst) %{
10256   match(Set dst (CastII dst));
10257   format %{ " -- \t// castII of $dst" %}
10258   size(0);
10259   ins_encode( /*empty*/ );
10260   ins_pipe(pipe_class_default);
10261 %}
10262 
10263 instruct castLL(iRegLdst dst) %{
10264   match(Set dst (CastLL dst));
10265   format %{ " -- \t// castLL of $dst" %}
10266   size(0);
10267   ins_encode( /*empty*/ );
10268   ins_pipe(pipe_class_default);
10269 %}
10270 
10271 instruct castFF(regF dst) %{
10272   match(Set dst (CastFF dst));
10273   format %{ " -- \t// castFF of $dst" %}
10274   size(0);
10275   ins_encode( /*empty*/ );
10276   ins_pipe(pipe_class_default);
10277 %}
10278 
10279 instruct castDD(regD dst) %{
10280   match(Set dst (CastDD dst));
10281   format %{ " -- \t// castDD of $dst" %}
10282   size(0);
10283   ins_encode( /*empty*/ );
10284   ins_pipe(pipe_class_default);
10285 %}
10286 
10287 instruct castVV8(iRegLdst dst) %{
10288   match(Set dst (CastVV dst));
10289   format %{ " -- \t// castVV of $dst" %}
10290   size(0);
10291   ins_encode( /*empty*/ );
10292   ins_pipe(pipe_class_default);
10293 %}
10294 
10295 instruct castVV16(vecX dst) %{
10296   match(Set dst (CastVV dst));
10297   format %{ " -- \t// castVV of $dst" %}
10298   size(0);
10299   ins_encode( /*empty*/ );
10300   ins_pipe(pipe_class_default);
10301 %}
10302 
10303 instruct checkCastPP(iRegPdst dst) %{
10304   match(Set dst (CheckCastPP dst));
10305   format %{ " -- \t// checkcastPP of $dst" %}
10306   size(0);
10307   ins_encode( /*empty*/ );
10308   ins_pipe(pipe_class_default);
10309 %}
10310 
10311 //----------Convert instructions-----------------------------------------------
10312 
10313 // Convert to boolean.
10314 
10315 // int_to_bool(src) : { 1   if src != 0
10316 //                    { 0   else
10317 //
10318 // strategy:
10319 // 1) Count leading zeros of 32 bit-value src,
10320 //    this returns 32 (0b10.0000) iff src == 0 and <32 otherwise.
10321 // 2) Shift 5 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
10322 // 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
10323 
10324 // convI2Bool
10325 instruct convI2Bool_reg__cntlz_Ex(iRegIdst dst, iRegIsrc src) %{
10326   match(Set dst (Conv2B src));
10327   predicate(UseCountLeadingZerosInstructionsPPC64);
10328   ins_cost(DEFAULT_COST);
10329 
10330   expand %{
10331     immI shiftAmount %{ 0x5 %}
10332     uimmI16 mask %{ 0x1 %}
10333     iRegIdst tmp1;
10334     iRegIdst tmp2;
10335     countLeadingZerosI(tmp1, src);
10336     urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
10337     xorI_reg_uimm16(dst, tmp2, mask);
10338   %}
10339 %}
10340 
10341 instruct convI2Bool_reg__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx) %{
10342   match(Set dst (Conv2B src));
10343   effect(TEMP crx);
10344   predicate(!UseCountLeadingZerosInstructionsPPC64);
10345   ins_cost(DEFAULT_COST);
10346 
10347   format %{ "CMPWI   $crx, $src, #0 \t// convI2B"
10348             "LI      $dst, #0\n\t"
10349             "BEQ     $crx, done\n\t"
10350             "LI      $dst, #1\n"
10351             "done:" %}
10352   size(16);
10353   ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x0, 0x1) );
10354   ins_pipe(pipe_class_compare);
10355 %}
10356 
10357 // ConvI2B + XorI
10358 instruct xorI_convI2Bool_reg_immIvalue1__cntlz_Ex(iRegIdst dst, iRegIsrc src, immI_1 mask) %{
10359   match(Set dst (XorI (Conv2B src) mask));
10360   predicate(UseCountLeadingZerosInstructionsPPC64);
10361   ins_cost(DEFAULT_COST);
10362 
10363   expand %{
10364     immI shiftAmount %{ 0x5 %}
10365     iRegIdst tmp1;
10366     countLeadingZerosI(tmp1, src);
10367     urShiftI_reg_imm(dst, tmp1, shiftAmount);
10368   %}
10369 %}
10370 
10371 instruct xorI_convI2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI_1 mask) %{
10372   match(Set dst (XorI (Conv2B src) mask));
10373   effect(TEMP crx);
10374   predicate(!UseCountLeadingZerosInstructionsPPC64);
10375   ins_cost(DEFAULT_COST);
10376 
10377   format %{ "CMPWI   $crx, $src, #0 \t// Xor(convI2B($src), $mask)"
10378             "LI      $dst, #1\n\t"
10379             "BEQ     $crx, done\n\t"
10380             "LI      $dst, #0\n"
10381             "done:" %}
10382   size(16);
10383   ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x1, 0x0) );
10384   ins_pipe(pipe_class_compare);
10385 %}
10386 
10387 // AndI 0b0..010..0 + ConvI2B
10388 instruct convI2Bool_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src, immIpowerOf2 mask) %{
10389   match(Set dst (Conv2B (AndI src mask)));
10390   predicate(UseRotateAndMaskInstructionsPPC64);
10391   ins_cost(DEFAULT_COST);
10392 
10393   format %{ "RLWINM  $dst, $src, $mask \t// convI2B(AndI($src, $mask))" %}
10394   size(4);
10395   ins_encode %{
10396     __ rlwinm($dst$$Register, $src$$Register, 32 - log2i_exact((juint)($mask$$constant)), 31, 31);
10397   %}
10398   ins_pipe(pipe_class_default);
10399 %}
10400 
10401 // Convert pointer to boolean.
10402 //
10403 // ptr_to_bool(src) : { 1   if src != 0
10404 //                    { 0   else
10405 //
10406 // strategy:
10407 // 1) Count leading zeros of 64 bit-value src,
10408 //    this returns 64 (0b100.0000) iff src == 0 and <64 otherwise.
10409 // 2) Shift 6 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
10410 // 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
10411 
10412 // ConvP2B
10413 instruct convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src) %{
10414   match(Set dst (Conv2B src));
10415   predicate(UseCountLeadingZerosInstructionsPPC64);
10416   ins_cost(DEFAULT_COST);
10417 
10418   expand %{
10419     immI shiftAmount %{ 0x6 %}
10420     uimmI16 mask %{ 0x1 %}
10421     iRegIdst tmp1;
10422     iRegIdst tmp2;
10423     countLeadingZerosP(tmp1, src);
10424     urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
10425     xorI_reg_uimm16(dst, tmp2, mask);
10426   %}
10427 %}
10428 
10429 instruct convP2Bool_reg__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx) %{
10430   match(Set dst (Conv2B src));
10431   effect(TEMP crx);
10432   predicate(!UseCountLeadingZerosInstructionsPPC64);
10433   ins_cost(DEFAULT_COST);
10434 
10435   format %{ "CMPDI   $crx, $src, #0 \t// convP2B"
10436             "LI      $dst, #0\n\t"
10437             "BEQ     $crx, done\n\t"
10438             "LI      $dst, #1\n"
10439             "done:" %}
10440   size(16);
10441   ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x0, 0x1) );
10442   ins_pipe(pipe_class_compare);
10443 %}
10444 
10445 // ConvP2B + XorI
10446 instruct xorI_convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src, immI_1 mask) %{
10447   match(Set dst (XorI (Conv2B src) mask));
10448   predicate(UseCountLeadingZerosInstructionsPPC64);
10449   ins_cost(DEFAULT_COST);
10450 
10451   expand %{
10452     immI shiftAmount %{ 0x6 %}
10453     iRegIdst tmp1;
10454     countLeadingZerosP(tmp1, src);
10455     urShiftI_reg_imm(dst, tmp1, shiftAmount);
10456   %}
10457 %}
10458 
10459 instruct xorI_convP2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx, immI_1 mask) %{
10460   match(Set dst (XorI (Conv2B src) mask));
10461   effect(TEMP crx);
10462   predicate(!UseCountLeadingZerosInstructionsPPC64);
10463   ins_cost(DEFAULT_COST);
10464 
10465   format %{ "CMPDI   $crx, $src, #0 \t// XorI(convP2B($src), $mask)"
10466             "LI      $dst, #1\n\t"
10467             "BEQ     $crx, done\n\t"
10468             "LI      $dst, #0\n"
10469             "done:" %}
10470   size(16);
10471   ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x1, 0x0) );
10472   ins_pipe(pipe_class_compare);
10473 %}
10474 
10475 // if src1 < src2, return -1 else return 0
10476 instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
10477   match(Set dst (CmpLTMask src1 src2));
10478   ins_cost(DEFAULT_COST*4);
10479 
10480   expand %{
10481     iRegLdst src1s;
10482     iRegLdst src2s;
10483     iRegLdst diff;
10484     convI2L_reg(src1s, src1); // Ensure proper sign extension.
10485     convI2L_reg(src2s, src2); // Ensure proper sign extension.
10486     subL_reg_reg(diff, src1s, src2s);
10487     // Need to consider >=33 bit result, therefore we need signmaskL.
10488     signmask64I_regL(dst, diff);
10489   %}
10490 %}
10491 
10492 instruct cmpLTMask_reg_immI0(iRegIdst dst, iRegIsrc src1, immI_0 src2) %{
10493   match(Set dst (CmpLTMask src1 src2)); // if src1 < src2, return -1 else return 0
10494   format %{ "SRAWI   $dst, $src1, $src2 \t// CmpLTMask" %}
10495   size(4);
10496   ins_encode %{
10497     __ srawi($dst$$Register, $src1$$Register, 0x1f);
10498   %}
10499   ins_pipe(pipe_class_default);
10500 %}
10501 
10502 //----------Arithmetic Conversion Instructions---------------------------------
10503 
10504 // Convert to Byte  -- nop
10505 // Convert to Short -- nop
10506 
10507 // Convert to Int
10508 
10509 instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
10510   match(Set dst (RShiftI (LShiftI src amount) amount));
10511   format %{ "EXTSB   $dst, $src \t// byte->int" %}
10512   size(4);
10513   ins_encode %{
10514     __ extsb($dst$$Register, $src$$Register);
10515   %}
10516   ins_pipe(pipe_class_default);
10517 %}
10518 
10519 instruct extsh(iRegIdst dst, iRegIsrc src) %{
10520   effect(DEF dst, USE src);
10521 
10522   size(4);
10523   ins_encode %{
10524     __ extsh($dst$$Register, $src$$Register);
10525   %}
10526   ins_pipe(pipe_class_default);
10527 %}
10528 
10529 // LShiftI 16 + RShiftI 16 converts short to int.
10530 instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
10531   match(Set dst (RShiftI (LShiftI src amount) amount));
10532   format %{ "EXTSH   $dst, $src \t// short->int" %}
10533   size(4);
10534   ins_encode %{
10535     __ extsh($dst$$Register, $src$$Register);
10536   %}
10537   ins_pipe(pipe_class_default);
10538 %}
10539 
10540 // ConvL2I + ConvI2L: Sign extend int in long register.
10541 instruct sxtI_L2L_reg(iRegLdst dst, iRegLsrc src) %{
10542   match(Set dst (ConvI2L (ConvL2I src)));
10543 
10544   format %{ "EXTSW   $dst, $src \t// long->long" %}
10545   size(4);
10546   ins_encode %{
10547     __ extsw($dst$$Register, $src$$Register);
10548   %}
10549   ins_pipe(pipe_class_default);
10550 %}
10551 
10552 instruct convL2I_reg(iRegIdst dst, iRegLsrc src) %{
10553   match(Set dst (ConvL2I src));
10554   format %{ "MR      $dst, $src \t// long->int" %}
10555   // variable size, 0 or 4
10556   ins_encode %{
10557     __ mr_if_needed($dst$$Register, $src$$Register);
10558   %}
10559   ins_pipe(pipe_class_default);
10560 %}
10561 
10562 instruct convD2IRaw_regD(regD dst, regD src) %{
10563   // no match-rule, false predicate
10564   effect(DEF dst, USE src);
10565   predicate(false);
10566 
10567   format %{ "FCTIWZ $dst, $src \t// convD2I, $src != NaN" %}
10568   size(4);
10569   ins_encode %{
10570     __ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
10571   %}
10572   ins_pipe(pipe_class_default);
10573 %}
10574 
10575 instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
10576   // no match-rule, false predicate
10577   effect(DEF dst, USE crx, USE src);
10578   predicate(false);
10579 
10580   ins_variable_size_depending_on_alignment(true);
10581 
10582   format %{ "cmovI   $crx, $dst, $src" %}
10583   // Worst case is branch + move + stop, no stop without scheduler.
10584   size(8);
10585   ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
10586   ins_pipe(pipe_class_default);
10587 %}
10588 
10589 instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
10590   // no match-rule, false predicate
10591   effect(DEF dst, USE crx, USE src);
10592   predicate(false);
10593 
10594   ins_variable_size_depending_on_alignment(true);
10595 
10596   format %{ "cmovI   $crx, $dst, $src" %}
10597   // Worst case is branch + move + stop, no stop without scheduler.
10598   size(8);
10599   ins_encode( enc_cmove_bso_reg(dst, crx, src) );
10600   ins_pipe(pipe_class_default);
10601 %}
10602 
10603 instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stackSlotL mem) %{
10604   // no match-rule, false predicate
10605   effect(DEF dst, USE crx, USE mem);
10606   predicate(false);
10607 
10608   format %{ "CmovI   $dst, $crx, $mem \t// postalloc expanded" %}
10609   postalloc_expand %{
10610     //
10611     // replaces
10612     //
10613     //   region  dst  crx  mem
10614     //    \       |    |   /
10615     //     dst=cmovI_bso_stackSlotL_conLvalue0
10616     //
10617     // with
10618     //
10619     //   region  dst
10620     //    \       /
10621     //     dst=loadConI16(0)
10622     //      |
10623     //      ^  region  dst  crx  mem
10624     //      |   \       |    |    /
10625     //      dst=cmovI_bso_stackSlotL
10626     //
10627 
10628     // Create new nodes.
10629     MachNode *m1 = new loadConI16Node();
10630     MachNode *m2 = new cmovI_bso_stackSlotLNode();
10631 
10632     // inputs for new nodes
10633     m1->add_req(n_region);
10634     m2->add_req(n_region, n_crx, n_mem);
10635 
10636     // precedences for new nodes
10637     m2->add_prec(m1);
10638 
10639     // operands for new nodes
10640     m1->_opnds[0] = op_dst;
10641     m1->_opnds[1] = new immI16Oper(0);
10642 
10643     m2->_opnds[0] = op_dst;
10644     m2->_opnds[1] = op_crx;
10645     m2->_opnds[2] = op_mem;
10646 
10647     // registers for new nodes
10648     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10649     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10650 
10651     // Insert new nodes.
10652     nodes->push(m1);
10653     nodes->push(m2);
10654   %}
10655 %}
10656 
10657 instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{
10658   // no match-rule, false predicate
10659   effect(DEF dst, USE crx, USE src);
10660   predicate(false);
10661 
10662   format %{ "CmovI   $dst, $crx, $src \t// postalloc expanded" %}
10663   postalloc_expand %{
10664     //
10665     // replaces
10666     //
10667     //   region  dst  crx  src
10668     //    \       |    |   /
10669     //     dst=cmovI_bso_reg_conLvalue0
10670     //
10671     // with
10672     //
10673     //   region  dst
10674     //    \       /
10675     //     dst=loadConI16(0)
10676     //      |
10677     //      ^  region  dst  crx  src
10678     //      |   \       |    |    /
10679     //      dst=cmovI_bso_reg
10680     //
10681 
10682     // Create new nodes.
10683     MachNode *m1 = new loadConI16Node();
10684     MachNode *m2 = new cmovI_bso_regNode();
10685 
10686     // inputs for new nodes
10687     m1->add_req(n_region);
10688     m2->add_req(n_region, n_crx, n_src);
10689 
10690     // precedences for new nodes
10691     m2->add_prec(m1);
10692 
10693     // operands for new nodes
10694     m1->_opnds[0] = op_dst;
10695     m1->_opnds[1] = new immI16Oper(0);
10696 
10697     m2->_opnds[0] = op_dst;
10698     m2->_opnds[1] = op_crx;
10699     m2->_opnds[2] = op_src;
10700 
10701     // registers for new nodes
10702     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10703     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10704 
10705     // Insert new nodes.
10706     nodes->push(m1);
10707     nodes->push(m2);
10708   %}
10709 %}
10710 
10711 // Double to Int conversion, NaN is mapped to 0.
10712 instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
10713   match(Set dst (ConvD2I src));
10714   predicate(!VM_Version::has_mtfprd());
10715   ins_cost(DEFAULT_COST);
10716 
10717   expand %{
10718     regD tmpD;
10719     stackSlotL tmpS;
10720     flagsReg crx;
10721     cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10722     convD2IRaw_regD(tmpD, src);                         // Convert float to int (speculated).
10723     moveD2L_reg_stack(tmpS, tmpD);                      // Store float to stack (speculated).
10724     cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
10725   %}
10726 %}
10727 
10728 // Double to Int conversion, NaN is mapped to 0. Special version for Power8.
10729 instruct convD2I_reg_mffprd_ExEx(iRegIdst dst, regD src) %{
10730   match(Set dst (ConvD2I src));
10731   predicate(VM_Version::has_mtfprd());
10732   ins_cost(DEFAULT_COST);
10733 
10734   expand %{
10735     regD tmpD;
10736     flagsReg crx;
10737     cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10738     convD2IRaw_regD(tmpD, src);                         // Convert float to int (speculated).
10739     cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpD);        // Cmove based on NaN check.
10740   %}
10741 %}
10742 
10743 instruct convF2IRaw_regF(regF dst, regF src) %{
10744   // no match-rule, false predicate
10745   effect(DEF dst, USE src);
10746   predicate(false);
10747 
10748   format %{ "FCTIWZ $dst, $src \t// convF2I, $src != NaN" %}
10749   size(4);
10750   ins_encode %{
10751     __ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
10752   %}
10753   ins_pipe(pipe_class_default);
10754 %}
10755 
10756 // Float to Int conversion, NaN is mapped to 0.
10757 instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
10758   match(Set dst (ConvF2I src));
10759   predicate(!VM_Version::has_mtfprd());
10760   ins_cost(DEFAULT_COST);
10761 
10762   expand %{
10763     regF tmpF;
10764     stackSlotL tmpS;
10765     flagsReg crx;
10766     cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10767     convF2IRaw_regF(tmpF, src);                         // Convert float to int (speculated).
10768     moveF2L_reg_stack(tmpS, tmpF);                      // Store float to stack (speculated).
10769     cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
10770   %}
10771 %}
10772 
10773 // Float to Int conversion, NaN is mapped to 0. Special version for Power8.
10774 instruct convF2I_regF_mffprd_ExEx(iRegIdst dst, regF src) %{
10775   match(Set dst (ConvF2I src));
10776   predicate(VM_Version::has_mtfprd());
10777   ins_cost(DEFAULT_COST);
10778 
10779   expand %{
10780     regF tmpF;
10781     flagsReg crx;
10782     cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10783     convF2IRaw_regF(tmpF, src);                         // Convert float to int (speculated).
10784     cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpF);        // Cmove based on NaN check.
10785   %}
10786 %}
10787 
10788 // Convert to Long
10789 
10790 instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{
10791   match(Set dst (ConvI2L src));
10792   format %{ "EXTSW   $dst, $src \t// int->long" %}
10793   size(4);
10794   ins_encode %{
10795     __ extsw($dst$$Register, $src$$Register);
10796   %}
10797   ins_pipe(pipe_class_default);
10798 %}
10799 
10800 // Zero-extend: convert unsigned int to long (convUI2L).
10801 instruct zeroExtendL_regI(iRegLdst dst, iRegIsrc src, immL_32bits mask) %{
10802   match(Set dst (AndL (ConvI2L src) mask));
10803   ins_cost(DEFAULT_COST);
10804 
10805   format %{ "CLRLDI  $dst, $src, #32 \t// zero-extend int to long" %}
10806   size(4);
10807   ins_encode %{
10808     __ clrldi($dst$$Register, $src$$Register, 32);
10809   %}
10810   ins_pipe(pipe_class_default);
10811 %}
10812 
10813 // Zero-extend: convert unsigned int to long in long register.
10814 instruct zeroExtendL_regL(iRegLdst dst, iRegLsrc src, immL_32bits mask) %{
10815   match(Set dst (AndL src mask));
10816   ins_cost(DEFAULT_COST);
10817 
10818   format %{ "CLRLDI  $dst, $src, #32 \t// zero-extend int to long" %}
10819   size(4);
10820   ins_encode %{
10821     __ clrldi($dst$$Register, $src$$Register, 32);
10822   %}
10823   ins_pipe(pipe_class_default);
10824 %}
10825 
10826 instruct convF2LRaw_regF(regF dst, regF src) %{
10827   // no match-rule, false predicate
10828   effect(DEF dst, USE src);
10829   predicate(false);
10830 
10831   format %{ "FCTIDZ $dst, $src \t// convF2L, $src != NaN" %}
10832   size(4);
10833   ins_encode %{
10834     __ fctidz($dst$$FloatRegister, $src$$FloatRegister);
10835   %}
10836   ins_pipe(pipe_class_default);
10837 %}
10838 
10839 instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
10840   // no match-rule, false predicate
10841   effect(DEF dst, USE crx, USE src);
10842   predicate(false);
10843 
10844   ins_variable_size_depending_on_alignment(true);
10845 
10846   format %{ "cmovL   $crx, $dst, $src" %}
10847   // Worst case is branch + move + stop, no stop without scheduler.
10848   size(8);
10849   ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
10850   ins_pipe(pipe_class_default);
10851 %}
10852 
10853 instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
10854   // no match-rule, false predicate
10855   effect(DEF dst, USE crx, USE src);
10856   predicate(false);
10857 
10858   ins_variable_size_depending_on_alignment(true);
10859 
10860   format %{ "cmovL   $crx, $dst, $src" %}
10861   // Worst case is branch + move + stop, no stop without scheduler.
10862   size(8);
10863   ins_encode( enc_cmove_bso_reg(dst, crx, src) );
10864   ins_pipe(pipe_class_default);
10865 %}
10866 
10867 instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stackSlotL mem) %{
10868   // no match-rule, false predicate
10869   effect(DEF dst, USE crx, USE mem);
10870   predicate(false);
10871 
10872   format %{ "CmovL   $dst, $crx, $mem \t// postalloc expanded" %}
10873   postalloc_expand %{
10874     //
10875     // replaces
10876     //
10877     //   region  dst  crx  mem
10878     //    \       |    |   /
10879     //     dst=cmovL_bso_stackSlotL_conLvalue0
10880     //
10881     // with
10882     //
10883     //   region  dst
10884     //    \       /
10885     //     dst=loadConL16(0)
10886     //      |
10887     //      ^  region  dst  crx  mem
10888     //      |   \       |    |    /
10889     //      dst=cmovL_bso_stackSlotL
10890     //
10891 
10892     // Create new nodes.
10893     MachNode *m1 = new loadConL16Node();
10894     MachNode *m2 = new cmovL_bso_stackSlotLNode();
10895 
10896     // inputs for new nodes
10897     m1->add_req(n_region);
10898     m2->add_req(n_region, n_crx, n_mem);
10899     m2->add_prec(m1);
10900 
10901     // operands for new nodes
10902     m1->_opnds[0] = op_dst;
10903     m1->_opnds[1] = new immL16Oper(0);
10904     m2->_opnds[0] = op_dst;
10905     m2->_opnds[1] = op_crx;
10906     m2->_opnds[2] = op_mem;
10907 
10908     // registers for new nodes
10909     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10910     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10911 
10912     // Insert new nodes.
10913     nodes->push(m1);
10914     nodes->push(m2);
10915   %}
10916 %}
10917 
10918 instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{
10919   // no match-rule, false predicate
10920   effect(DEF dst, USE crx, USE src);
10921   predicate(false);
10922 
10923   format %{ "CmovL   $dst, $crx, $src \t// postalloc expanded" %}
10924   postalloc_expand %{
10925     //
10926     // replaces
10927     //
10928     //   region  dst  crx  src
10929     //    \       |    |   /
10930     //     dst=cmovL_bso_reg_conLvalue0
10931     //
10932     // with
10933     //
10934     //   region  dst
10935     //    \       /
10936     //     dst=loadConL16(0)
10937     //      |
10938     //      ^  region  dst  crx  src
10939     //      |   \       |    |    /
10940     //      dst=cmovL_bso_reg
10941     //
10942 
10943     // Create new nodes.
10944     MachNode *m1 = new loadConL16Node();
10945     MachNode *m2 = new cmovL_bso_regNode();
10946 
10947     // inputs for new nodes
10948     m1->add_req(n_region);
10949     m2->add_req(n_region, n_crx, n_src);
10950     m2->add_prec(m1);
10951 
10952     // operands for new nodes
10953     m1->_opnds[0] = op_dst;
10954     m1->_opnds[1] = new immL16Oper(0);
10955     m2->_opnds[0] = op_dst;
10956     m2->_opnds[1] = op_crx;
10957     m2->_opnds[2] = op_src;
10958 
10959     // registers for new nodes
10960     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10961     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
10962 
10963     // Insert new nodes.
10964     nodes->push(m1);
10965     nodes->push(m2);
10966   %}
10967 %}
10968 
10969 // Float to Long conversion, NaN is mapped to 0.
10970 instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
10971   match(Set dst (ConvF2L src));
10972   predicate(!VM_Version::has_mtfprd());
10973   ins_cost(DEFAULT_COST);
10974 
10975   expand %{
10976     regF tmpF;
10977     stackSlotL tmpS;
10978     flagsReg crx;
10979     cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10980     convF2LRaw_regF(tmpF, src);                         // Convert float to long (speculated).
10981     moveF2L_reg_stack(tmpS, tmpF);                      // Store float to stack (speculated).
10982     cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
10983   %}
10984 %}
10985 
10986 // Float to Long conversion, NaN is mapped to 0. Special version for Power8.
10987 instruct convF2L_reg_mffprd_ExEx(iRegLdst dst, regF src) %{
10988   match(Set dst (ConvF2L src));
10989   predicate(VM_Version::has_mtfprd());
10990   ins_cost(DEFAULT_COST);
10991 
10992   expand %{
10993     regF tmpF;
10994     flagsReg crx;
10995     cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
10996     convF2LRaw_regF(tmpF, src);                         // Convert float to long (speculated).
10997     cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpF);        // Cmove based on NaN check.
10998   %}
10999 %}
11000 
11001 instruct convD2LRaw_regD(regD dst, regD src) %{
11002   // no match-rule, false predicate
11003   effect(DEF dst, USE src);
11004   predicate(false);
11005 
11006   format %{ "FCTIDZ $dst, $src \t// convD2L $src != NaN" %}
11007   size(4);
11008   ins_encode %{
11009     __ fctidz($dst$$FloatRegister, $src$$FloatRegister);
11010   %}
11011   ins_pipe(pipe_class_default);
11012 %}
11013 
11014 // Double to Long conversion, NaN is mapped to 0.
11015 instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
11016   match(Set dst (ConvD2L src));
11017   predicate(!VM_Version::has_mtfprd());
11018   ins_cost(DEFAULT_COST);
11019 
11020   expand %{
11021     regD tmpD;
11022     stackSlotL tmpS;
11023     flagsReg crx;
11024     cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
11025     convD2LRaw_regD(tmpD, src);                         // Convert float to long (speculated).
11026     moveD2L_reg_stack(tmpS, tmpD);                      // Store float to stack (speculated).
11027     cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
11028   %}
11029 %}
11030 
11031 // Double to Long conversion, NaN is mapped to 0. Special version for Power8.
11032 instruct convD2L_reg_mffprd_ExEx(iRegLdst dst, regD src) %{
11033   match(Set dst (ConvD2L src));
11034   predicate(VM_Version::has_mtfprd());
11035   ins_cost(DEFAULT_COST);
11036 
11037   expand %{
11038     regD tmpD;
11039     flagsReg crx;
11040     cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
11041     convD2LRaw_regD(tmpD, src);                         // Convert float to long (speculated).
11042     cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpD);        // Cmove based on NaN check.
11043   %}
11044 %}
11045 
11046 // Convert to Float
11047 
11048 // Placed here as needed in expand.
11049 instruct convL2DRaw_regD(regD dst, regD src) %{
11050   // no match-rule, false predicate
11051   effect(DEF dst, USE src);
11052   predicate(false);
11053 
11054   format %{ "FCFID $dst, $src \t// convL2D" %}
11055   size(4);
11056   ins_encode %{
11057     __ fcfid($dst$$FloatRegister, $src$$FloatRegister);
11058   %}
11059   ins_pipe(pipe_class_default);
11060 %}
11061 
11062 // Placed here as needed in expand.
11063 instruct convD2F_reg(regF dst, regD src) %{
11064   match(Set dst (ConvD2F src));
11065   format %{ "FRSP    $dst, $src \t// convD2F" %}
11066   size(4);
11067   ins_encode %{
11068     __ frsp($dst$$FloatRegister, $src$$FloatRegister);
11069   %}
11070   ins_pipe(pipe_class_default);
11071 %}
11072 
11073 // Integer to Float conversion.
11074 instruct convI2F_ireg_Ex(regF dst, iRegIsrc src) %{
11075   match(Set dst (ConvI2F src));
11076   predicate(!VM_Version::has_fcfids());
11077   ins_cost(DEFAULT_COST);
11078 
11079   expand %{
11080     iRegLdst tmpL;
11081     stackSlotL tmpS;
11082     regD tmpD;
11083     regD tmpD2;
11084     convI2L_reg(tmpL, src);              // Sign-extension int to long.
11085     regL_to_stkL(tmpS, tmpL);            // Store long to stack.
11086     moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
11087     convL2DRaw_regD(tmpD2, tmpD);        // Convert to double.
11088     convD2F_reg(dst, tmpD2);             // Convert double to float.
11089   %}
11090 %}
11091 
11092 instruct convL2FRaw_regF(regF dst, regD src) %{
11093   // no match-rule, false predicate
11094   effect(DEF dst, USE src);
11095   predicate(false);
11096 
11097   format %{ "FCFIDS $dst, $src \t// convL2F" %}
11098   size(4);
11099   ins_encode %{
11100     __ fcfids($dst$$FloatRegister, $src$$FloatRegister);
11101   %}
11102   ins_pipe(pipe_class_default);
11103 %}
11104 
11105 // Integer to Float conversion. Special version for Power7.
11106 instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
11107   match(Set dst (ConvI2F src));
11108   predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
11109   ins_cost(DEFAULT_COST);
11110 
11111   expand %{
11112     iRegLdst tmpL;
11113     stackSlotL tmpS;
11114     regD tmpD;
11115     convI2L_reg(tmpL, src);              // Sign-extension int to long.
11116     regL_to_stkL(tmpS, tmpL);            // Store long to stack.
11117     moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
11118     convL2FRaw_regF(dst, tmpD);          // Convert to float.
11119   %}
11120 %}
11121 
11122 // Integer to Float conversion. Special version for Power8.
11123 instruct convI2F_ireg_mtfprd_Ex(regF dst, iRegIsrc src) %{
11124   match(Set dst (ConvI2F src));
11125   predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
11126   ins_cost(DEFAULT_COST);
11127 
11128   expand %{
11129     regD tmpD;
11130     moveI2D_reg(tmpD, src);
11131     convL2FRaw_regF(dst, tmpD);          // Convert to float.
11132   %}
11133 %}
11134 
11135 // L2F to avoid runtime call.
11136 instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
11137   match(Set dst (ConvL2F src));
11138   predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
11139   ins_cost(DEFAULT_COST);
11140 
11141   expand %{
11142     stackSlotL tmpS;
11143     regD tmpD;
11144     regL_to_stkL(tmpS, src);             // Store long to stack.
11145     moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
11146     convL2FRaw_regF(dst, tmpD);          // Convert to float.
11147   %}
11148 %}
11149 
11150 // L2F to avoid runtime call.  Special version for Power8.
11151 instruct convL2F_ireg_mtfprd_Ex(regF dst, iRegLsrc src) %{
11152   match(Set dst (ConvL2F src));
11153   predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
11154   ins_cost(DEFAULT_COST);
11155 
11156   expand %{
11157     regD tmpD;
11158     moveL2D_reg(tmpD, src);
11159     convL2FRaw_regF(dst, tmpD);          // Convert to float.
11160   %}
11161 %}
11162 
11163 // Moved up as used in expand.
11164 //instruct convD2F_reg(regF dst, regD src) %{%}
11165 
11166 // Convert to Double
11167 
11168 // Integer to Double conversion.
11169 instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
11170   match(Set dst (ConvI2D src));
11171   predicate(!VM_Version::has_mtfprd());
11172   ins_cost(DEFAULT_COST);
11173 
11174   expand %{
11175     iRegLdst tmpL;
11176     stackSlotL tmpS;
11177     regD tmpD;
11178     convI2L_reg(tmpL, src);              // Sign-extension int to long.
11179     regL_to_stkL(tmpS, tmpL);            // Store long to stack.
11180     moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
11181     convL2DRaw_regD(dst, tmpD);          // Convert to double.
11182   %}
11183 %}
11184 
11185 // Integer to Double conversion. Special version for Power8.
11186 instruct convI2D_reg_mtfprd_Ex(regD dst, iRegIsrc src) %{
11187   match(Set dst (ConvI2D src));
11188   predicate(VM_Version::has_mtfprd());
11189   ins_cost(DEFAULT_COST);
11190 
11191   expand %{
11192     regD tmpD;
11193     moveI2D_reg(tmpD, src);
11194     convL2DRaw_regD(dst, tmpD);          // Convert to double.
11195   %}
11196 %}
11197 
11198 // Long to Double conversion
11199 instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
11200   match(Set dst (ConvL2D src));
11201   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
11202 
11203   expand %{
11204     regD tmpD;
11205     moveL2D_stack_reg(tmpD, src);
11206     convL2DRaw_regD(dst, tmpD);
11207   %}
11208 %}
11209 
11210 // Long to Double conversion. Special version for Power8.
11211 instruct convL2D_reg_mtfprd_Ex(regD dst, iRegLsrc src) %{
11212   match(Set dst (ConvL2D src));
11213   predicate(VM_Version::has_mtfprd());
11214   ins_cost(DEFAULT_COST);
11215 
11216   expand %{
11217     regD tmpD;
11218     moveL2D_reg(tmpD, src);
11219     convL2DRaw_regD(dst, tmpD);          // Convert to double.
11220   %}
11221 %}
11222 
11223 instruct convF2D_reg(regD dst, regF src) %{
11224   match(Set dst (ConvF2D src));
11225   format %{ "FMR     $dst, $src \t// float->double" %}
11226   // variable size, 0 or 4
11227   ins_encode %{
11228     __ fmr_if_needed($dst$$FloatRegister, $src$$FloatRegister);
11229   %}
11230   ins_pipe(pipe_class_default);
11231 %}
11232 
11233 //----------Control Flow Instructions------------------------------------------
11234 // Compare Instructions
11235 
11236 // Compare Integers
11237 instruct cmpI_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
11238   match(Set crx (CmpI src1 src2));
11239   size(4);
11240   format %{ "CMPW    $crx, $src1, $src2" %}
11241   ins_encode %{
11242     __ cmpw($crx$$CondRegister, $src1$$Register, $src2$$Register);
11243   %}
11244   ins_pipe(pipe_class_compare);
11245 %}
11246 
11247 instruct cmpI_reg_imm16(flagsReg crx, iRegIsrc src1, immI16 src2) %{
11248   match(Set crx (CmpI src1 src2));
11249   format %{ "CMPWI   $crx, $src1, $src2" %}
11250   size(4);
11251   ins_encode %{
11252     __ cmpwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11253   %}
11254   ins_pipe(pipe_class_compare);
11255 %}
11256 
11257 // (src1 & src2) == 0?
11258 instruct testI_reg_imm(flagsRegCR0 cr0, iRegIsrc src1, uimmI16 src2, immI_0 zero) %{
11259   match(Set cr0 (CmpI (AndI src1 src2) zero));
11260   // r0 is killed
11261   format %{ "ANDI    R0, $src1, $src2 \t// BTST int" %}
11262   size(4);
11263   ins_encode %{
11264     __ andi_(R0, $src1$$Register, $src2$$constant);
11265   %}
11266   ins_pipe(pipe_class_compare);
11267 %}
11268 
11269 instruct cmpL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{
11270   match(Set crx (CmpL src1 src2));
11271   format %{ "CMPD    $crx, $src1, $src2" %}
11272   size(4);
11273   ins_encode %{
11274     __ cmpd($crx$$CondRegister, $src1$$Register, $src2$$Register);
11275   %}
11276   ins_pipe(pipe_class_compare);
11277 %}
11278 
11279 instruct cmpL_reg_imm16(flagsReg crx, iRegLsrc src1, immL16 src2) %{
11280   match(Set crx (CmpL src1 src2));
11281   format %{ "CMPDI   $crx, $src1, $src2" %}
11282   size(4);
11283   ins_encode %{
11284     __ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11285   %}
11286   ins_pipe(pipe_class_compare);
11287 %}
11288 
11289 // Added CmpUL for LoopPredicate.
11290 instruct cmpUL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{
11291   match(Set crx (CmpUL src1 src2));
11292   format %{ "CMPLD   $crx, $src1, $src2" %}
11293   size(4);
11294   ins_encode %{
11295     __ cmpld($crx$$CondRegister, $src1$$Register, $src2$$Register);
11296   %}
11297   ins_pipe(pipe_class_compare);
11298 %}
11299 
11300 instruct cmpUL_reg_imm16(flagsReg crx, iRegLsrc src1, uimmL16 src2) %{
11301   match(Set crx (CmpUL src1 src2));
11302   format %{ "CMPLDI  $crx, $src1, $src2" %}
11303   size(4);
11304   ins_encode %{
11305     __ cmpldi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11306   %}
11307   ins_pipe(pipe_class_compare);
11308 %}
11309 
11310 instruct testL_reg_reg(flagsRegCR0 cr0, iRegLsrc src1, iRegLsrc src2, immL_0 zero) %{
11311   match(Set cr0 (CmpL (AndL src1 src2) zero));
11312   // r0 is killed
11313   format %{ "AND     R0, $src1, $src2 \t// BTST long" %}
11314   size(4);
11315   ins_encode %{
11316     __ and_(R0, $src1$$Register, $src2$$Register);
11317   %}
11318   ins_pipe(pipe_class_compare);
11319 %}
11320 
11321 instruct testL_reg_imm(flagsRegCR0 cr0, iRegLsrc src1, uimmL16 src2, immL_0 zero) %{
11322   match(Set cr0 (CmpL (AndL src1 src2) zero));
11323   // r0 is killed
11324   format %{ "ANDI    R0, $src1, $src2 \t// BTST long" %}
11325   size(4);
11326   ins_encode %{
11327     __ andi_(R0, $src1$$Register, $src2$$constant);
11328   %}
11329   ins_pipe(pipe_class_compare);
11330 %}
11331 
11332 // Manifest a CmpL3 result in an integer register.
11333 instruct cmpL3_reg_reg(iRegIdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
11334   match(Set dst (CmpL3 src1 src2));
11335   effect(KILL cr0);
11336   ins_cost(DEFAULT_COST * 5);
11337   size((VM_Version::has_brw() ? 16 : 20));
11338 
11339   format %{ "cmpL3_reg_reg $dst, $src1, $src2" %}
11340 
11341   ins_encode %{
11342     __ cmpd(CCR0, $src1$$Register, $src2$$Register);
11343     __ set_cmp3($dst$$Register);
11344   %}
11345   ins_pipe(pipe_class_default);
11346 %}
11347 
11348 // Implicit range checks.
11349 // A range check in the ideal world has one of the following shapes:
11350 //  - (If le (CmpU length index)), (IfTrue  throw exception)
11351 //  - (If lt (CmpU index length)), (IfFalse throw exception)
11352 //
11353 // Match range check 'If le (CmpU length index)'.
11354 instruct rangeCheck_iReg_uimm15(cmpOp cmp, iRegIsrc src_length, uimmI15 index, label labl) %{
11355   match(If cmp (CmpU src_length index));
11356   effect(USE labl);
11357   predicate(TrapBasedRangeChecks &&
11358             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le &&
11359             PROB_UNLIKELY(_leaf->as_If()->_prob) >= PROB_ALWAYS &&
11360             (Matcher::branches_to_uncommon_trap(_leaf)));
11361 
11362   ins_is_TrapBasedCheckNode(true);
11363 
11364   format %{ "TWI     $index $cmp $src_length \t// RangeCheck => trap $labl" %}
11365   size(4);
11366   ins_encode %{
11367     if ($cmp$$cmpcode == 0x1 /* less_equal */) {
11368       __ trap_range_check_le($src_length$$Register, $index$$constant);
11369     } else {
11370       // Both successors are uncommon traps, probability is 0.
11371       // Node got flipped during fixup flow.
11372       assert($cmp$$cmpcode == 0x9, "must be greater");
11373       __ trap_range_check_g($src_length$$Register, $index$$constant);
11374     }
11375   %}
11376   ins_pipe(pipe_class_trap);
11377 %}
11378 
11379 // Match range check 'If lt (CmpU index length)'.
11380 instruct rangeCheck_iReg_iReg(cmpOp cmp, iRegIsrc src_index, iRegIsrc src_length, label labl) %{
11381   match(If cmp (CmpU src_index src_length));
11382   effect(USE labl);
11383   predicate(TrapBasedRangeChecks &&
11384             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
11385             _leaf->as_If()->_prob >= PROB_ALWAYS &&
11386             (Matcher::branches_to_uncommon_trap(_leaf)));
11387 
11388   ins_is_TrapBasedCheckNode(true);
11389 
11390   format %{ "TW      $src_index $cmp $src_length \t// RangeCheck => trap $labl" %}
11391   size(4);
11392   ins_encode %{
11393     if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
11394       __ trap_range_check_ge($src_index$$Register, $src_length$$Register);
11395     } else {
11396       // Both successors are uncommon traps, probability is 0.
11397       // Node got flipped during fixup flow.
11398       assert($cmp$$cmpcode == 0x8, "must be less");
11399       __ trap_range_check_l($src_index$$Register, $src_length$$Register);
11400     }
11401   %}
11402   ins_pipe(pipe_class_trap);
11403 %}
11404 
11405 // Match range check 'If lt (CmpU index length)'.
11406 instruct rangeCheck_uimm15_iReg(cmpOp cmp, iRegIsrc src_index, uimmI15 length, label labl) %{
11407   match(If cmp (CmpU src_index length));
11408   effect(USE labl);
11409   predicate(TrapBasedRangeChecks &&
11410             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
11411             _leaf->as_If()->_prob >= PROB_ALWAYS &&
11412             (Matcher::branches_to_uncommon_trap(_leaf)));
11413 
11414   ins_is_TrapBasedCheckNode(true);
11415 
11416   format %{ "TWI     $src_index $cmp $length \t// RangeCheck => trap $labl" %}
11417   size(4);
11418   ins_encode %{
11419     if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
11420       __ trap_range_check_ge($src_index$$Register, $length$$constant);
11421     } else {
11422       // Both successors are uncommon traps, probability is 0.
11423       // Node got flipped during fixup flow.
11424       assert($cmp$$cmpcode == 0x8, "must be less");
11425       __ trap_range_check_l($src_index$$Register, $length$$constant);
11426     }
11427   %}
11428   ins_pipe(pipe_class_trap);
11429 %}
11430 
11431 instruct compU_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
11432   match(Set crx (CmpU src1 src2));
11433   format %{ "CMPLW   $crx, $src1, $src2 \t// unsigned" %}
11434   size(4);
11435   ins_encode %{
11436     __ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
11437   %}
11438   ins_pipe(pipe_class_compare);
11439 %}
11440 
11441 instruct compU_reg_uimm16(flagsReg crx, iRegIsrc src1, uimmI16 src2) %{
11442   match(Set crx (CmpU src1 src2));
11443   size(4);
11444   format %{ "CMPLWI  $crx, $src1, $src2" %}
11445   ins_encode %{
11446     __ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11447   %}
11448   ins_pipe(pipe_class_compare);
11449 %}
11450 
11451 // Implicit zero checks (more implicit null checks).
11452 // No constant pool entries required.
11453 instruct zeroCheckN_iReg_imm0(cmpOp cmp, iRegNsrc value, immN_0 zero, label labl) %{
11454   match(If cmp (CmpN value zero));
11455   effect(USE labl);
11456   predicate(TrapBasedNullChecks &&
11457             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
11458             _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
11459             Matcher::branches_to_uncommon_trap(_leaf));
11460   ins_cost(1);
11461 
11462   ins_is_TrapBasedCheckNode(true);
11463 
11464   format %{ "TDI     $value $cmp $zero \t// ZeroCheckN => trap $labl" %}
11465   size(4);
11466   ins_encode %{
11467     if ($cmp$$cmpcode == 0xA) {
11468       __ trap_null_check($value$$Register);
11469     } else {
11470       // Both successors are uncommon traps, probability is 0.
11471       // Node got flipped during fixup flow.
11472       assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
11473       __ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
11474     }
11475   %}
11476   ins_pipe(pipe_class_trap);
11477 %}
11478 
11479 // Compare narrow oops.
11480 instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{
11481   match(Set crx (CmpN src1 src2));
11482 
11483   size(4);
11484   ins_cost(2);
11485   format %{ "CMPLW   $crx, $src1, $src2 \t// compressed ptr" %}
11486   ins_encode %{
11487     __ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
11488   %}
11489   ins_pipe(pipe_class_compare);
11490 %}
11491 
11492 instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{
11493   match(Set crx (CmpN src1 src2));
11494   // Make this more expensive than zeroCheckN_iReg_imm0.
11495   ins_cost(2);
11496 
11497   format %{ "CMPLWI  $crx, $src1, $src2 \t// compressed ptr" %}
11498   size(4);
11499   ins_encode %{
11500     __ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11501   %}
11502   ins_pipe(pipe_class_compare);
11503 %}
11504 
11505 // Implicit zero checks (more implicit null checks).
11506 // No constant pool entries required.
11507 instruct zeroCheckP_reg_imm0(cmpOp cmp, iRegP_N2P value, immP_0 zero, label labl) %{
11508   match(If cmp (CmpP value zero));
11509   effect(USE labl);
11510   predicate(TrapBasedNullChecks &&
11511             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
11512             _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
11513             Matcher::branches_to_uncommon_trap(_leaf));
11514   ins_cost(1); // Should not be cheaper than zeroCheckN.
11515 
11516   ins_is_TrapBasedCheckNode(true);
11517 
11518   format %{ "TDI     $value $cmp $zero \t// ZeroCheckP => trap $labl" %}
11519   size(4);
11520   ins_encode %{
11521     if ($cmp$$cmpcode == 0xA) {
11522       __ trap_null_check($value$$Register);
11523     } else {
11524       // Both successors are uncommon traps, probability is 0.
11525       // Node got flipped during fixup flow.
11526       assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
11527       __ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
11528     }
11529   %}
11530   ins_pipe(pipe_class_trap);
11531 %}
11532 
11533 // Compare Pointers
11534 instruct cmpP_reg_reg(flagsReg crx, iRegP_N2P src1, iRegP_N2P src2) %{
11535   match(Set crx (CmpP src1 src2));
11536   format %{ "CMPLD   $crx, $src1, $src2 \t// ptr" %}
11537   size(4);
11538   ins_encode %{
11539     __ cmpld($crx$$CondRegister, $src1$$Register, $src2$$Register);
11540   %}
11541   ins_pipe(pipe_class_compare);
11542 %}
11543 
11544 instruct cmpP_reg_null(flagsReg crx, iRegP_N2P src1, immP_0or1 src2) %{
11545   match(Set crx (CmpP src1 src2));
11546   format %{ "CMPLDI   $crx, $src1, $src2 \t// ptr" %}
11547   size(4);
11548   ins_encode %{
11549     __ cmpldi($crx$$CondRegister, $src1$$Register, (int)((short)($src2$$constant & 0xFFFF)));
11550   %}
11551   ins_pipe(pipe_class_compare);
11552 %}
11553 
11554 // Used in postalloc expand.
11555 instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
11556   // This match rule prevents reordering of node before a safepoint.
11557   // This only makes sense if this instructions is used exclusively
11558   // for the expansion of EncodeP!
11559   match(Set crx (CmpP src1 src2));
11560   predicate(false);
11561 
11562   format %{ "CMPDI   $crx, $src1, $src2" %}
11563   size(4);
11564   ins_encode %{
11565     __ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
11566   %}
11567   ins_pipe(pipe_class_compare);
11568 %}
11569 
11570 //----------Float Compares----------------------------------------------------
11571 
11572 instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
11573   // Needs matchrule, see cmpDUnordered.
11574   match(Set crx (CmpF src1 src2));
11575   // no match-rule, false predicate
11576   predicate(false);
11577 
11578   format %{ "cmpFUrd $crx, $src1, $src2" %}
11579   size(4);
11580   ins_encode %{
11581     __ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
11582   %}
11583   ins_pipe(pipe_class_default);
11584 %}
11585 
11586 instruct cmov_bns_less(flagsReg crx) %{
11587   // no match-rule, false predicate
11588   effect(DEF crx);
11589   predicate(false);
11590 
11591   ins_variable_size_depending_on_alignment(true);
11592 
11593   format %{ "cmov    $crx" %}
11594   // Worst case is branch + move + stop, no stop without scheduler.
11595   size(12);
11596   ins_encode %{
11597     Label done;
11598     __ bns($crx$$CondRegister, done);        // not unordered -> keep crx
11599     __ li(R0, 0);
11600     __ cmpwi($crx$$CondRegister, R0, 1);     // unordered -> set crx to 'less'
11601     __ bind(done);
11602   %}
11603   ins_pipe(pipe_class_default);
11604 %}
11605 
11606 // Compare floating, generate condition code.
11607 instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
11608   // FIXME: should we match 'If cmp (CmpF src1 src2))' ??
11609   //
11610   // The following code sequence occurs a lot in mpegaudio:
11611   //
11612   // block BXX:
11613   // 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0):
11614   //    cmpFUrd CCR6, F11, F9
11615   // 4: instruct cmov_bns_less (cmpF_reg_reg-1):
11616   //    cmov CCR6
11617   // 8: instruct branchConSched:
11618   //    B_FARle CCR6, B56  P=0.500000 C=-1.000000
11619   match(Set crx (CmpF src1 src2));
11620   ins_cost(DEFAULT_COST+BRANCH_COST);
11621 
11622   format %{ "CmpF    $crx, $src1, $src2 \t// postalloc expanded" %}
11623   postalloc_expand %{
11624     //
11625     // replaces
11626     //
11627     //   region  src1  src2
11628     //    \       |     |
11629     //     crx=cmpF_reg_reg
11630     //
11631     // with
11632     //
11633     //   region  src1  src2
11634     //    \       |     |
11635     //     crx=cmpFUnordered_reg_reg
11636     //      |
11637     //      ^  region
11638     //      |   \
11639     //      crx=cmov_bns_less
11640     //
11641 
11642     // Create new nodes.
11643     MachNode *m1 = new cmpFUnordered_reg_regNode();
11644     MachNode *m2 = new cmov_bns_lessNode();
11645 
11646     // inputs for new nodes
11647     m1->add_req(n_region, n_src1, n_src2);
11648     m2->add_req(n_region);
11649     m2->add_prec(m1);
11650 
11651     // operands for new nodes
11652     m1->_opnds[0] = op_crx;
11653     m1->_opnds[1] = op_src1;
11654     m1->_opnds[2] = op_src2;
11655     m2->_opnds[0] = op_crx;
11656 
11657     // registers for new nodes
11658     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
11659     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
11660 
11661     // Insert new nodes.
11662     nodes->push(m1);
11663     nodes->push(m2);
11664   %}
11665 %}
11666 
11667 // Compare float, generate -1,0,1
11668 instruct cmpF3_reg_reg(iRegIdst dst, regF src1, regF src2, flagsRegCR0 cr0) %{
11669   match(Set dst (CmpF3 src1 src2));
11670   effect(KILL cr0);
11671   ins_cost(DEFAULT_COST * 6);
11672   size((VM_Version::has_brw() ? 20 : 24));
11673 
11674   format %{ "cmpF3_reg_reg $dst, $src1, $src2" %}
11675 
11676   ins_encode %{
11677     __ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
11678     __ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
11679   %}
11680   ins_pipe(pipe_class_default);
11681 %}
11682 
11683 instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
11684   // Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
11685   // node right before the conditional move using it.
11686   // In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7,
11687   // compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle
11688   // crashed in register allocation where the flags Reg between cmpDUnoredered and a
11689   // conditional move was supposed to be spilled.
11690   match(Set crx (CmpD src1 src2));
11691   // False predicate, shall not be matched.
11692   predicate(false);
11693 
11694   format %{ "cmpFUrd $crx, $src1, $src2" %}
11695   size(4);
11696   ins_encode %{
11697     __ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
11698   %}
11699   ins_pipe(pipe_class_default);
11700 %}
11701 
11702 instruct cmpD_reg_reg_Ex(flagsReg crx, regD src1, regD src2) %{
11703   match(Set crx (CmpD src1 src2));
11704   ins_cost(DEFAULT_COST+BRANCH_COST);
11705 
11706   format %{ "CmpD    $crx, $src1, $src2 \t// postalloc expanded" %}
11707   postalloc_expand %{
11708     //
11709     // replaces
11710     //
11711     //   region  src1  src2
11712     //    \       |     |
11713     //     crx=cmpD_reg_reg
11714     //
11715     // with
11716     //
11717     //   region  src1  src2
11718     //    \       |     |
11719     //     crx=cmpDUnordered_reg_reg
11720     //      |
11721     //      ^  region
11722     //      |   \
11723     //      crx=cmov_bns_less
11724     //
11725 
11726     // create new nodes
11727     MachNode *m1 = new cmpDUnordered_reg_regNode();
11728     MachNode *m2 = new cmov_bns_lessNode();
11729 
11730     // inputs for new nodes
11731     m1->add_req(n_region, n_src1, n_src2);
11732     m2->add_req(n_region);
11733     m2->add_prec(m1);
11734 
11735     // operands for new nodes
11736     m1->_opnds[0] = op_crx;
11737     m1->_opnds[1] = op_src1;
11738     m1->_opnds[2] = op_src2;
11739     m2->_opnds[0] = op_crx;
11740 
11741     // registers for new nodes
11742     ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
11743     ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
11744 
11745     // Insert new nodes.
11746     nodes->push(m1);
11747     nodes->push(m2);
11748   %}
11749 %}
11750 
11751 // Compare double, generate -1,0,1
11752 instruct cmpD3_reg_reg(iRegIdst dst, regD src1, regD src2, flagsRegCR0 cr0) %{
11753   match(Set dst (CmpD3 src1 src2));
11754   effect(KILL cr0);
11755   ins_cost(DEFAULT_COST * 6);
11756   size((VM_Version::has_brw() ? 20 : 24));
11757 
11758   format %{ "cmpD3_reg_reg $dst, $src1, $src2" %}
11759 
11760   ins_encode %{
11761     __ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
11762     __ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
11763   %}
11764   ins_pipe(pipe_class_default);
11765 %}
11766 
11767 // Compare char
11768 instruct cmprb_Digit_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
11769   match(Set dst (Digit src1));
11770   effect(TEMP src2, TEMP crx);
11771   ins_cost(3 * DEFAULT_COST);
11772 
11773   format %{ "LI      $src2, 0x3930\n\t"
11774             "CMPRB   $crx, 0, $src1, $src2\n\t"
11775             "SETB    $dst, $crx" %}
11776   size(12);
11777   ins_encode %{
11778     // 0x30: 0, 0x39: 9
11779     __ li($src2$$Register, 0x3930);
11780     // compare src1 with ranges 0x30 to 0x39
11781     __ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
11782     __ setb($dst$$Register, $crx$$CondRegister);
11783   %}
11784   ins_pipe(pipe_class_default);
11785 %}
11786 
11787 instruct cmprb_LowerCase_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
11788   match(Set dst (LowerCase src1));
11789   effect(TEMP src2, TEMP crx);
11790   ins_cost(12 * DEFAULT_COST);
11791 
11792   format %{ "LI      $src2, 0x7A61\n\t"
11793             "CMPRB   $crx, 0, $src1, $src2\n\t"
11794             "BGT     $crx, done\n\t"
11795             "LIS     $src2, (signed short)0xF6DF\n\t"
11796             "ORI     $src2, $src2, 0xFFF8\n\t"
11797             "CMPRB   $crx, 1, $src1, $src2\n\t"
11798             "BGT     $crx, done\n\t"
11799             "LIS     $src2, (signed short)0xAAB5\n\t"
11800             "ORI     $src2, $src2, 0xBABA\n\t"
11801             "INSRDI  $src2, $src2, 32, 0\n\t"
11802             "CMPEQB  $crx, 1, $src1, $src2\n"
11803             "done:\n\t"
11804             "SETB    $dst, $crx" %}
11805 
11806   size(48);
11807   ins_encode %{
11808     Label done;
11809     // 0x61: a, 0x7A: z
11810     __ li($src2$$Register, 0x7A61);
11811     // compare src1 with ranges 0x61 to 0x7A
11812     __ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
11813     __ bgt($crx$$CondRegister, done);
11814 
11815     // 0xDF: sharp s, 0xFF: y with diaeresis, 0xF7 is not the lower case
11816     __ lis($src2$$Register, (signed short)0xF6DF);
11817     __ ori($src2$$Register, $src2$$Register, 0xFFF8);
11818     // compare src1 with ranges 0xDF to 0xF6 and 0xF8 to 0xFF
11819     __ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
11820     __ bgt($crx$$CondRegister, done);
11821 
11822     // 0xAA: feminine ordinal indicator
11823     // 0xB5: micro sign
11824     // 0xBA: masculine ordinal indicator
11825     __ lis($src2$$Register, (signed short)0xAAB5);
11826     __ ori($src2$$Register, $src2$$Register, 0xBABA);
11827     __ insrdi($src2$$Register, $src2$$Register, 32, 0);
11828     // compare src1 with 0xAA, 0xB5, and 0xBA
11829     __ cmpeqb($crx$$CondRegister, $src1$$Register, $src2$$Register);
11830 
11831     __ bind(done);
11832     __ setb($dst$$Register, $crx$$CondRegister);
11833   %}
11834   ins_pipe(pipe_class_default);
11835 %}
11836 
11837 instruct cmprb_UpperCase_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
11838   match(Set dst (UpperCase src1));
11839   effect(TEMP src2, TEMP crx);
11840   ins_cost(7 * DEFAULT_COST);
11841 
11842   format %{ "LI      $src2, 0x5A41\n\t"
11843             "CMPRB   $crx, 0, $src1, $src2\n\t"
11844             "BGT     $crx, done\n\t"
11845             "LIS     $src2, (signed short)0xD6C0\n\t"
11846             "ORI     $src2, $src2, 0xDED8\n\t"
11847             "CMPRB   $crx, 1, $src1, $src2\n"
11848             "done:\n\t"
11849             "SETB    $dst, $crx" %}
11850 
11851   size(28);
11852   ins_encode %{
11853     Label done;
11854     // 0x41: A, 0x5A: Z
11855     __ li($src2$$Register, 0x5A41);
11856     // compare src1 with a range 0x41 to 0x5A
11857     __ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
11858     __ bgt($crx$$CondRegister, done);
11859 
11860     // 0xC0: a with grave, 0xDE: thorn, 0xD7 is not the upper case
11861     __ lis($src2$$Register, (signed short)0xD6C0);
11862     __ ori($src2$$Register, $src2$$Register, 0xDED8);
11863     // compare src1 with ranges 0xC0 to 0xD6 and 0xD8 to 0xDE
11864     __ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
11865 
11866     __ bind(done);
11867     __ setb($dst$$Register, $crx$$CondRegister);
11868   %}
11869   ins_pipe(pipe_class_default);
11870 %}
11871 
11872 instruct cmprb_Whitespace_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
11873   match(Set dst (Whitespace src1));
11874   predicate(PowerArchitecturePPC64 <= 9);
11875   effect(TEMP src2, TEMP crx);
11876   ins_cost(4 * DEFAULT_COST);
11877 
11878   format %{ "LI      $src2, 0x0D09\n\t"
11879             "ADDIS   $src2, 0x201C\n\t"
11880             "CMPRB   $crx, 1, $src1, $src2\n\t"
11881             "SETB    $dst, $crx" %}
11882   size(16);
11883   ins_encode %{
11884     // 0x09 to 0x0D, 0x1C to 0x20
11885     __ li($src2$$Register, 0x0D09);
11886     __ addis($src2$$Register, $src2$$Register, 0x0201C);
11887     // compare src with ranges 0x09 to 0x0D and 0x1C to 0x20
11888     __ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
11889     __ setb($dst$$Register, $crx$$CondRegister);
11890   %}
11891   ins_pipe(pipe_class_default);
11892 %}
11893 
11894 // Power 10 version, using prefixed addi to load 32-bit constant
11895 instruct cmprb_Whitespace_reg_reg_prefixed(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
11896   match(Set dst (Whitespace src1));
11897   predicate(PowerArchitecturePPC64 >= 10);
11898   effect(TEMP src2, TEMP crx);
11899   ins_cost(3 * DEFAULT_COST);
11900 
11901   format %{ "PLI     $src2, 0x201C0D09\n\t"
11902             "CMPRB   $crx, 1, $src1, $src2\n\t"
11903             "SETB    $dst, $crx" %}
11904   size(16);
11905   ins_encode %{
11906     // 0x09 to 0x0D, 0x1C to 0x20
11907     assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
11908     __ pli($src2$$Register, 0x201C0D09);
11909     // compare src with ranges 0x09 to 0x0D and 0x1C to 0x20
11910     __ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
11911     __ setb($dst$$Register, $crx$$CondRegister);
11912   %}
11913   ins_pipe(pipe_class_default);
11914   ins_alignment(2);
11915 %}
11916 
11917 //----------Branches---------------------------------------------------------
11918 // Jump
11919 
11920 // Direct Branch.
11921 instruct branch(label labl) %{
11922   match(Goto);
11923   effect(USE labl);
11924   ins_cost(BRANCH_COST);
11925 
11926   format %{ "B       $labl" %}
11927   size(4);
11928   ins_encode %{
11929      Label d;    // dummy
11930      __ bind(d);
11931      Label* p = $labl$$label;
11932      // `p' is `nullptr' when this encoding class is used only to
11933      // determine the size of the encoded instruction.
11934      Label& l = (nullptr == p)? d : *(p);
11935      __ b(l);
11936   %}
11937   ins_pipe(pipe_class_default);
11938 %}
11939 
11940 // Conditional Near Branch
11941 instruct branchCon(cmpOp cmp, flagsRegSrc crx, label lbl) %{
11942   // Same match rule as `branchConFar'.
11943   match(If cmp crx);
11944   effect(USE lbl);
11945   ins_cost(BRANCH_COST);
11946 
11947   // If set to 1 this indicates that the current instruction is a
11948   // short variant of a long branch. This avoids using this
11949   // instruction in first-pass matching. It will then only be used in
11950   // the `Shorten_branches' pass.
11951   ins_short_branch(1);
11952 
11953   format %{ "B$cmp     $crx, $lbl" %}
11954   size(4);
11955   ins_encode( enc_bc(crx, cmp, lbl) );
11956   ins_pipe(pipe_class_default);
11957 %}
11958 
11959 // This is for cases when the ppc64 `bc' instruction does not
11960 // reach far enough. So we emit a far branch here, which is more
11961 // expensive.
11962 //
11963 // Conditional Far Branch
11964 instruct branchConFar(cmpOp cmp, flagsRegSrc crx, label lbl) %{
11965   // Same match rule as `branchCon'.
11966   match(If cmp crx);
11967   effect(USE crx, USE lbl);
11968   // Higher cost than `branchCon'.
11969   ins_cost(5*BRANCH_COST);
11970 
11971   // This is not a short variant of a branch, but the long variant.
11972   ins_short_branch(0);
11973 
11974   format %{ "B_FAR$cmp $crx, $lbl" %}
11975   size(8);
11976   ins_encode( enc_bc_far(crx, cmp, lbl) );
11977   ins_pipe(pipe_class_default);
11978 %}
11979 
11980 instruct branchLoopEnd(cmpOp cmp, flagsRegSrc crx, label labl) %{
11981   match(CountedLoopEnd cmp crx);
11982   effect(USE labl);
11983   ins_cost(BRANCH_COST);
11984 
11985   // short variant.
11986   ins_short_branch(1);
11987 
11988   format %{ "B$cmp     $crx, $labl \t// counted loop end" %}
11989   size(4);
11990   ins_encode( enc_bc(crx, cmp, labl) );
11991   ins_pipe(pipe_class_default);
11992 %}
11993 
11994 instruct branchLoopEndFar(cmpOp cmp, flagsRegSrc crx, label labl) %{
11995   match(CountedLoopEnd cmp crx);
11996   effect(USE labl);
11997   ins_cost(BRANCH_COST);
11998 
11999   // Long variant.
12000   ins_short_branch(0);
12001 
12002   format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %}
12003   size(8);
12004   ins_encode( enc_bc_far(crx, cmp, labl) );
12005   ins_pipe(pipe_class_default);
12006 %}
12007 
12008 // ============================================================================
12009 // Java runtime operations, intrinsics and other complex operations.
12010 
12011 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12012 // array for an instance of the superklass. Set a hidden internal cache on a
12013 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12014 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
12015 //
12016 // GL TODO: Improve this.
12017 // - result should not be a TEMP
12018 // - Add match rule as on sparc avoiding additional Cmp.
12019 instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
12020                              iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
12021   match(Set result (PartialSubtypeCheck subklass superklass));
12022   effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
12023   ins_cost(DEFAULT_COST*10);
12024 
12025   format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
12026   ins_encode %{
12027     __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
12028                                      $tmp_klass$$Register, nullptr, $result$$Register);
12029   %}
12030   ins_pipe(pipe_class_default);
12031 %}
12032 
12033 instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP super_con, rarg6RegP result,
12034                                        rarg1RegP tempR1, rarg5RegP tempR2, rarg4RegP tempR3, rscratch1RegP tempR4,
12035                                        flagsRegCR0 cr0, regCTR ctr)
12036 %{
12037   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
12038   predicate(UseSecondarySupersTable);
12039   effect(KILL cr0, KILL ctr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP tempR4);
12040 
12041   ins_cost(DEFAULT_COST*8);  // smaller than the other version
12042   format %{ "partialSubtypeCheck $result, $sub, $super_reg" %}
12043 
12044   ins_encode %{
12045     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
12046     if (InlineSecondarySupersTest) {
12047       __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register,
12048                                        $tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
12049                                        $result$$Register, super_klass_slot);
12050     } else {
12051       address stub = StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot);
12052       Register r_stub_addr = $tempR1$$Register;
12053       __ add_const_optimized(r_stub_addr, R29_TOC, MacroAssembler::offset_to_global_toc(stub), R0);
12054       __ mtctr(r_stub_addr);
12055       __ bctrl();
12056     }
12057   %}
12058 
12059   ins_pipe(pipe_class_memory);
12060 %}
12061 
12062 // inlined locking and unlocking
12063 
12064 instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
12065   predicate(LockingMode != LM_LIGHTWEIGHT);
12066   match(Set crx (FastLock oop box));
12067   effect(TEMP tmp1, TEMP tmp2);
12068 
12069   format %{ "FASTLOCK  $oop, $box, $tmp1, $tmp2" %}
12070   ins_encode %{
12071     __ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
12072                                  $tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
12073     // If locking was successful, crx should indicate 'EQ'.
12074     // The compiler generates a branch to the runtime call to
12075     // _complete_monitor_locking_Java for the case where crx is 'NE'.
12076   %}
12077   ins_pipe(pipe_class_compare);
12078 %}
12079 
12080 instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
12081   predicate(LockingMode != LM_LIGHTWEIGHT);
12082   match(Set crx (FastUnlock oop box));
12083   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
12084 
12085   format %{ "FASTUNLOCK  $oop, $box, $tmp1, $tmp2" %}
12086   ins_encode %{
12087     __ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
12088                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
12089     // If unlocking was successful, crx should indicate 'EQ'.
12090     // The compiler generates a branch to the runtime call to
12091     // _complete_monitor_unlocking_Java for the case where crx is 'NE'.
12092   %}
12093   ins_pipe(pipe_class_compare);
12094 %}
12095 
12096 instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR1 cr1) %{
12097   predicate(LockingMode == LM_LIGHTWEIGHT);
12098   match(Set crx (FastLock oop box));
12099   effect(TEMP tmp1, TEMP tmp2, KILL cr1);
12100 
12101   format %{ "FASTLOCK  $oop, $box, $tmp1, $tmp2" %}
12102   ins_encode %{
12103     __ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
12104                              $tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
12105     // If locking was successful, crx should indicate 'EQ'.
12106     // The compiler generates a branch to the runtime call to
12107     // _complete_monitor_locking_Java for the case where crx is 'NE'.
12108   %}
12109   ins_pipe(pipe_class_compare);
12110 %}
12111 
12112 instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
12113   predicate(LockingMode == LM_LIGHTWEIGHT);
12114   match(Set crx (FastUnlock oop box));
12115   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
12116 
12117   format %{ "FASTUNLOCK  $oop, $box, $tmp1, $tmp2" %}
12118   ins_encode %{
12119     __ fast_unlock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
12120                                $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
12121     // If unlocking was successful, crx should indicate 'EQ'.
12122     // The compiler generates a branch to the runtime call to
12123     // _complete_monitor_unlocking_Java for the case where crx is 'NE'.
12124   %}
12125   ins_pipe(pipe_class_compare);
12126 %}
12127 
12128 // Align address.
12129 instruct align_addr(iRegPdst dst, iRegPsrc src, immLnegpow2 mask) %{
12130   match(Set dst (CastX2P (AndL (CastP2X src) mask)));
12131 
12132   format %{ "ANDDI   $dst, $src, $mask \t// next aligned address" %}
12133   size(4);
12134   ins_encode %{
12135     __ clrrdi($dst$$Register, $src$$Register, log2i_exact(-(julong)$mask$$constant));
12136   %}
12137   ins_pipe(pipe_class_default);
12138 %}
12139 
12140 // Array size computation.
12141 instruct array_size(iRegLdst dst, iRegPsrc end, iRegPsrc start) %{
12142   match(Set dst (SubL (CastP2X end) (CastP2X start)));
12143 
12144   format %{ "SUB     $dst, $end, $start \t// array size in bytes" %}
12145   size(4);
12146   ins_encode %{
12147     __ subf($dst$$Register, $start$$Register, $end$$Register);
12148   %}
12149   ins_pipe(pipe_class_default);
12150 %}
12151 
12152 // Clear-array with constant short array length. The versions below can use dcbz with cnt > 30.
12153 instruct inlineCallClearArrayShort(immLmax30 cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{
12154   match(Set dummy (ClearArray cnt base));
12155   effect(USE_KILL base, KILL ctr);
12156   ins_cost(2 * MEMORY_REF_COST);
12157 
12158   format %{ "ClearArray $cnt, $base" %}
12159   ins_encode %{
12160     __ clear_memory_constlen($base$$Register, $cnt$$constant, R0); // kills base, R0
12161   %}
12162   ins_pipe(pipe_class_default);
12163 %}
12164 
12165 // Clear-array with constant large array length.
12166 instruct inlineCallClearArrayLarge(immL cnt, rarg2RegP base, Universe dummy, iRegLdst tmp, regCTR ctr) %{
12167   match(Set dummy (ClearArray cnt base));
12168   effect(USE_KILL base, TEMP tmp, KILL ctr);
12169   ins_cost(3 * MEMORY_REF_COST);
12170 
12171   format %{ "ClearArray $cnt, $base \t// KILL $tmp" %}
12172   ins_encode %{
12173     __ clear_memory_doubleword($base$$Register, $tmp$$Register, R0, $cnt$$constant); // kills base, R0
12174   %}
12175   ins_pipe(pipe_class_default);
12176 %}
12177 
12178 // Clear-array with dynamic array length.
12179 instruct inlineCallClearArray(rarg1RegL cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{
12180   match(Set dummy (ClearArray cnt base));
12181   effect(USE_KILL cnt, USE_KILL base, KILL ctr);
12182   ins_cost(4 * MEMORY_REF_COST);
12183 
12184   format %{ "ClearArray $cnt, $base" %}
12185   ins_encode %{
12186     __ clear_memory_doubleword($base$$Register, $cnt$$Register, R0); // kills cnt, base, R0
12187   %}
12188   ins_pipe(pipe_class_default);
12189 %}
12190 
12191 instruct string_compareL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
12192                          iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
12193   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
12194   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12195   effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
12196   ins_cost(300);
12197   format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
12198   ins_encode %{
12199     __ string_compare($str1$$Register, $str2$$Register,
12200                       $cnt1$$Register, $cnt2$$Register,
12201                       $tmp$$Register,
12202                       $result$$Register, StrIntrinsicNode::LL);
12203   %}
12204   ins_pipe(pipe_class_default);
12205 %}
12206 
12207 instruct string_compareU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
12208                          iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
12209   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
12210   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12211   effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
12212   ins_cost(300);
12213   format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
12214   ins_encode %{
12215     __ string_compare($str1$$Register, $str2$$Register,
12216                       $cnt1$$Register, $cnt2$$Register,
12217                       $tmp$$Register,
12218                       $result$$Register, StrIntrinsicNode::UU);
12219   %}
12220   ins_pipe(pipe_class_default);
12221 %}
12222 
12223 instruct string_compareLU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
12224                           iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
12225   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
12226   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12227   effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
12228   ins_cost(300);
12229   format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
12230   ins_encode %{
12231     __ string_compare($str1$$Register, $str2$$Register,
12232                       $cnt1$$Register, $cnt2$$Register,
12233                       $tmp$$Register,
12234                       $result$$Register, StrIntrinsicNode::LU);
12235   %}
12236   ins_pipe(pipe_class_default);
12237 %}
12238 
12239 instruct string_compareUL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
12240                           iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
12241   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
12242   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12243   effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
12244   ins_cost(300);
12245   format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
12246   ins_encode %{
12247     __ string_compare($str2$$Register, $str1$$Register,
12248                       $cnt2$$Register, $cnt1$$Register,
12249                       $tmp$$Register,
12250                       $result$$Register, StrIntrinsicNode::UL);
12251   %}
12252   ins_pipe(pipe_class_default);
12253 %}
12254 
12255 instruct string_equalsL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt, iRegIdst result,
12256                         iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
12257   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
12258   match(Set result (StrEquals (Binary str1 str2) cnt));
12259   effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp, KILL ctr, KILL cr0);
12260   ins_cost(300);
12261   format %{ "String Equals byte[] $str1,$str2,$cnt -> $result \t// KILL $tmp" %}
12262   ins_encode %{
12263     __ array_equals(false, $str1$$Register, $str2$$Register,
12264                     $cnt$$Register, $tmp$$Register,
12265                     $result$$Register, true /* byte */);
12266   %}
12267   ins_pipe(pipe_class_default);
12268 %}
12269 
12270 instruct array_equalsB(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
12271                        iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR0 cr1) %{
12272   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
12273   match(Set result (AryEq ary1 ary2));
12274   effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
12275   ins_cost(300);
12276   format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
12277   ins_encode %{
12278     __ array_equals(true, $ary1$$Register, $ary2$$Register,
12279                     $tmp1$$Register, $tmp2$$Register,
12280                     $result$$Register, true /* byte */);
12281   %}
12282   ins_pipe(pipe_class_default);
12283 %}
12284 
12285 instruct array_equalsC(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
12286                        iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR0 cr1) %{
12287   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
12288   match(Set result (AryEq ary1 ary2));
12289   effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
12290   ins_cost(300);
12291   format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
12292   ins_encode %{
12293     __ array_equals(true, $ary1$$Register, $ary2$$Register,
12294                     $tmp1$$Register, $tmp2$$Register,
12295                     $result$$Register, false /* byte */);
12296   %}
12297   ins_pipe(pipe_class_default);
12298 %}
12299 
12300 instruct indexOf_imm1_char_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12301                              immP needleImm, immL offsetImm, immI_1 needlecntImm,
12302                              iRegIdst tmp1, iRegIdst tmp2,
12303                              flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12304   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
12305   effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12306   // Required for EA: check if it is still a type_array.
12307   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
12308   ins_cost(150);
12309 
12310   format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
12311             "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
12312 
12313   ins_encode %{
12314     immPOper *needleOper = (immPOper *)$needleImm;
12315     const TypeOopPtr *t = needleOper->type()->isa_oopptr();
12316     ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
12317     jchar chr;
12318 #ifdef VM_LITTLE_ENDIAN
12319     chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
12320            ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
12321 #else
12322     chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
12323            ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
12324 #endif
12325     __ string_indexof_char($result$$Register,
12326                            $haystack$$Register, $haycnt$$Register,
12327                            R0, chr,
12328                            $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
12329   %}
12330   ins_pipe(pipe_class_compare);
12331 %}
12332 
12333 instruct indexOf_imm1_char_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12334                              immP needleImm, immL offsetImm, immI_1 needlecntImm,
12335                              iRegIdst tmp1, iRegIdst tmp2,
12336                              flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12337   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
12338   effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12339   // Required for EA: check if it is still a type_array.
12340   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
12341   ins_cost(150);
12342 
12343   format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
12344             "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
12345 
12346   ins_encode %{
12347     immPOper *needleOper = (immPOper *)$needleImm;
12348     const TypeOopPtr *t = needleOper->type()->isa_oopptr();
12349     ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
12350     jchar chr = (jchar)needle_values->element_value(0).as_byte();
12351     __ string_indexof_char($result$$Register,
12352                            $haystack$$Register, $haycnt$$Register,
12353                            R0, chr,
12354                            $tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
12355   %}
12356   ins_pipe(pipe_class_compare);
12357 %}
12358 
12359 instruct indexOf_imm1_char_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12360                               immP needleImm, immL offsetImm, immI_1 needlecntImm,
12361                               iRegIdst tmp1, iRegIdst tmp2,
12362                               flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12363   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
12364   effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12365   // Required for EA: check if it is still a type_array.
12366   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
12367   ins_cost(150);
12368 
12369   format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
12370             "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
12371 
12372   ins_encode %{
12373     immPOper *needleOper = (immPOper *)$needleImm;
12374     const TypeOopPtr *t = needleOper->type()->isa_oopptr();
12375     ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
12376     jchar chr = (jchar)needle_values->element_value(0).as_byte();
12377     __ string_indexof_char($result$$Register,
12378                            $haystack$$Register, $haycnt$$Register,
12379                            R0, chr,
12380                            $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
12381   %}
12382   ins_pipe(pipe_class_compare);
12383 %}
12384 
12385 instruct indexOf_imm1_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12386                         rscratch2RegP needle, immI_1 needlecntImm,
12387                         iRegIdst tmp1, iRegIdst tmp2,
12388                         flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12389   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12390   effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12391   // Required for EA: check if it is still a type_array.
12392   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
12393             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12394             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12395   ins_cost(180);
12396 
12397   format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12398             " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
12399   ins_encode %{
12400     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12401     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12402     guarantee(needle_values, "sanity");
12403     jchar chr;
12404 #ifdef VM_LITTLE_ENDIAN
12405     chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
12406            ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
12407 #else
12408     chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
12409            ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
12410 #endif
12411     __ string_indexof_char($result$$Register,
12412                            $haystack$$Register, $haycnt$$Register,
12413                            R0, chr,
12414                            $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
12415   %}
12416   ins_pipe(pipe_class_compare);
12417 %}
12418 
12419 instruct indexOf_imm1_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12420                         rscratch2RegP needle, immI_1 needlecntImm,
12421                         iRegIdst tmp1, iRegIdst tmp2,
12422                         flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12423   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12424   effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12425   // Required for EA: check if it is still a type_array.
12426   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
12427             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12428             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12429   ins_cost(180);
12430 
12431   format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12432             " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
12433   ins_encode %{
12434     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12435     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12436     guarantee(needle_values, "sanity");
12437     jchar chr = (jchar)needle_values->element_value(0).as_byte();
12438     __ string_indexof_char($result$$Register,
12439                            $haystack$$Register, $haycnt$$Register,
12440                            R0, chr,
12441                            $tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
12442   %}
12443   ins_pipe(pipe_class_compare);
12444 %}
12445 
12446 instruct indexOf_imm1_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12447                          rscratch2RegP needle, immI_1 needlecntImm,
12448                          iRegIdst tmp1, iRegIdst tmp2,
12449                          flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12450   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12451   effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12452   // Required for EA: check if it is still a type_array.
12453   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
12454             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12455             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12456   ins_cost(180);
12457 
12458   format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12459             " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
12460   ins_encode %{
12461     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12462     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12463     guarantee(needle_values, "sanity");
12464     jchar chr = (jchar)needle_values->element_value(0).as_byte();
12465     __ string_indexof_char($result$$Register,
12466                            $haystack$$Register, $haycnt$$Register,
12467                            R0, chr,
12468                            $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
12469   %}
12470   ins_pipe(pipe_class_compare);
12471 %}
12472 
12473 instruct indexOfChar_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12474                        iRegIsrc ch, iRegIdst tmp1, iRegIdst tmp2,
12475                        flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12476   match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
12477   effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12478   predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
12479   ins_cost(180);
12480 
12481   format %{ "StringUTF16 IndexOfChar $haystack[0..$haycnt], $ch"
12482             " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
12483   ins_encode %{
12484     __ string_indexof_char($result$$Register,
12485                            $haystack$$Register, $haycnt$$Register,
12486                            $ch$$Register, 0 /* this is not used if the character is already in a register */,
12487                            $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
12488   %}
12489   ins_pipe(pipe_class_compare);
12490 %}
12491 
12492 instruct indexOfChar_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
12493                        iRegIsrc ch, iRegIdst tmp1, iRegIdst tmp2,
12494                        flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
12495   match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
12496   effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
12497   predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
12498   ins_cost(180);
12499 
12500   format %{ "StringLatin1 IndexOfChar $haystack[0..$haycnt], $ch"
12501             " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
12502   ins_encode %{
12503     __ string_indexof_char($result$$Register,
12504                            $haystack$$Register, $haycnt$$Register,
12505                            $ch$$Register, 0 /* this is not used if the character is already in a register */,
12506                            $tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
12507   %}
12508   ins_pipe(pipe_class_compare);
12509 %}
12510 
12511 instruct indexOf_imm_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
12512                        iRegPsrc needle, uimmI15 needlecntImm,
12513                        iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
12514                        flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12515   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12516   effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
12517          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12518   // Required for EA: check if it is still a type_array.
12519   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
12520             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12521             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12522   ins_cost(250);
12523 
12524   format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12525             " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
12526   ins_encode %{
12527     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12528     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12529 
12530     __ string_indexof($result$$Register,
12531                       $haystack$$Register, $haycnt$$Register,
12532                       $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
12533                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
12534   %}
12535   ins_pipe(pipe_class_compare);
12536 %}
12537 
12538 instruct indexOf_imm_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
12539                        iRegPsrc needle, uimmI15 needlecntImm,
12540                        iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
12541                        flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12542   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12543   effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
12544          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12545   // Required for EA: check if it is still a type_array.
12546   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
12547             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12548             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12549   ins_cost(250);
12550 
12551   format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12552             " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
12553   ins_encode %{
12554     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12555     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12556 
12557     __ string_indexof($result$$Register,
12558                       $haystack$$Register, $haycnt$$Register,
12559                       $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
12560                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
12561   %}
12562   ins_pipe(pipe_class_compare);
12563 %}
12564 
12565 instruct indexOf_imm_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
12566                         iRegPsrc needle, uimmI15 needlecntImm,
12567                         iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
12568                         flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12569   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
12570   effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
12571          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12572   // Required for EA: check if it is still a type_array.
12573   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
12574             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
12575             n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
12576   ins_cost(250);
12577 
12578   format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
12579             " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
12580   ins_encode %{
12581     Node *ndl = in(operand_index($needle));  // The node that defines needle.
12582     ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
12583 
12584     __ string_indexof($result$$Register,
12585                       $haystack$$Register, $haycnt$$Register,
12586                       $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
12587                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
12588   %}
12589   ins_pipe(pipe_class_compare);
12590 %}
12591 
12592 instruct indexOf_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
12593                    iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
12594                    flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12595   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
12596   effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
12597          TEMP_DEF result,
12598          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12599   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
12600   ins_cost(300);
12601 
12602   format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
12603              " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
12604   ins_encode %{
12605     __ string_indexof($result$$Register,
12606                       $haystack$$Register, $haycnt$$Register,
12607                       $needle$$Register, nullptr, $needlecnt$$Register, 0,  // needlecnt not constant.
12608                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
12609   %}
12610   ins_pipe(pipe_class_compare);
12611 %}
12612 
12613 instruct indexOf_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
12614                    iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
12615                    flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12616   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
12617   effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
12618          TEMP_DEF result,
12619          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12620   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
12621   ins_cost(300);
12622 
12623   format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
12624              " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
12625   ins_encode %{
12626     __ string_indexof($result$$Register,
12627                       $haystack$$Register, $haycnt$$Register,
12628                       $needle$$Register, nullptr, $needlecnt$$Register, 0,  // needlecnt not constant.
12629                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
12630   %}
12631   ins_pipe(pipe_class_compare);
12632 %}
12633 
12634 instruct indexOf_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
12635                     iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
12636                     flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
12637   match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
12638   effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
12639          TEMP_DEF result,
12640          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
12641   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
12642   ins_cost(300);
12643 
12644   format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
12645              " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
12646   ins_encode %{
12647     __ string_indexof($result$$Register,
12648                       $haystack$$Register, $haycnt$$Register,
12649                       $needle$$Register, nullptr, $needlecnt$$Register, 0,  // needlecnt not constant.
12650                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
12651   %}
12652   ins_pipe(pipe_class_compare);
12653 %}
12654 
12655 // char[] to byte[] compression
12656 instruct string_compress(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
12657                          iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
12658   match(Set result (StrCompressedCopy src (Binary dst len)));
12659   effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
12660          USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
12661   ins_cost(300);
12662   format %{ "String Compress $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
12663   ins_encode %{
12664     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
12665                         $tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, false);
12666   %}
12667   ins_pipe(pipe_class_default);
12668 %}
12669 
12670 // byte[] to char[] inflation
12671 instruct string_inflate(Universe dummy, rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegLdst tmp1,
12672                         iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
12673   match(Set dummy (StrInflatedCopy src (Binary dst len)));
12674   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
12675   ins_cost(300);
12676   format %{ "String Inflate $src,$dst,$len \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
12677   ins_encode %{
12678     Label Ldone;
12679     __ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
12680                          $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
12681     __ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
12682     __ beq(CCR0, Ldone);
12683     __ string_inflate($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register);
12684     __ bind(Ldone);
12685   %}
12686   ins_pipe(pipe_class_default);
12687 %}
12688 
12689 // StringCoding.java intrinsics
12690 instruct count_positives(iRegPsrc ary1, iRegIsrc len, iRegIdst result, iRegLdst tmp1, iRegLdst tmp2,
12691                          regCTR ctr, flagsRegCR0 cr0)
12692 %{
12693   match(Set result (CountPositives ary1 len));
12694   effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0);
12695   ins_cost(300);
12696   format %{ "count positives byte[] $ary1,$len -> $result \t// KILL $tmp1, $tmp2" %}
12697   ins_encode %{
12698     __ count_positives($ary1$$Register, $len$$Register, $result$$Register,
12699                        $tmp1$$Register, $tmp2$$Register);
12700   %}
12701   ins_pipe(pipe_class_default);
12702 %}
12703 
12704 // encode char[] to byte[] in ISO_8859_1
12705 instruct encode_iso_array(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
12706                           iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
12707   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
12708   match(Set result (EncodeISOArray src (Binary dst len)));
12709   effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
12710          USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
12711   ins_cost(300);
12712   format %{ "Encode iso array $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
12713   ins_encode %{
12714     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
12715                         $tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, false);
12716   %}
12717   ins_pipe(pipe_class_default);
12718 %}
12719 
12720 // encode char[] to byte[] in ASCII
12721 instruct encode_ascii_array(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
12722                           iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
12723   predicate(((EncodeISOArrayNode*)n)->is_ascii());
12724   match(Set result (EncodeISOArray src (Binary dst len)));
12725   effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
12726          USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
12727   ins_cost(300);
12728   format %{ "Encode ascii array $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
12729   ins_encode %{
12730     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
12731                         $tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, true);
12732   %}
12733   ins_pipe(pipe_class_default);
12734 %}
12735 
12736 
12737 //---------- Min/Max Instructions ---------------------------------------------
12738 
12739 instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
12740   match(Set dst (MinI src1 src2));
12741   ins_cost(DEFAULT_COST*6);
12742 
12743   expand %{
12744     iRegLdst src1s;
12745     iRegLdst src2s;
12746     iRegLdst diff;
12747     iRegLdst sm;
12748     iRegLdst doz; // difference or zero
12749     convI2L_reg(src1s, src1); // Ensure proper sign extension.
12750     convI2L_reg(src2s, src2); // Ensure proper sign extension.
12751     subL_reg_reg(diff, src2s, src1s);
12752     // Need to consider >=33 bit result, therefore we need signmaskL.
12753     signmask64L_regL(sm, diff);
12754     andL_reg_reg(doz, diff, sm); // <=0
12755     addI_regL_regL(dst, doz, src1s);
12756   %}
12757 %}
12758 
12759 instruct minI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
12760   match(Set dst (MinI src1 src2));
12761   effect(KILL cr0);
12762   predicate(VM_Version::has_isel());
12763   ins_cost(DEFAULT_COST*2);
12764 
12765   ins_encode %{
12766     __ cmpw(CCR0, $src1$$Register, $src2$$Register);
12767     __ isel($dst$$Register, CCR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
12768   %}
12769   ins_pipe(pipe_class_default);
12770 %}
12771 
12772 instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
12773   match(Set dst (MaxI src1 src2));
12774   ins_cost(DEFAULT_COST*6);
12775 
12776   expand %{
12777     iRegLdst src1s;
12778     iRegLdst src2s;
12779     iRegLdst diff;
12780     iRegLdst sm;
12781     iRegLdst doz; // difference or zero
12782     convI2L_reg(src1s, src1); // Ensure proper sign extension.
12783     convI2L_reg(src2s, src2); // Ensure proper sign extension.
12784     subL_reg_reg(diff, src2s, src1s);
12785     // Need to consider >=33 bit result, therefore we need signmaskL.
12786     signmask64L_regL(sm, diff);
12787     andcL_reg_reg(doz, diff, sm); // >=0
12788     addI_regL_regL(dst, doz, src1s);
12789   %}
12790 %}
12791 
12792 instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
12793   match(Set dst (MaxI src1 src2));
12794   effect(KILL cr0);
12795   predicate(VM_Version::has_isel());
12796   ins_cost(DEFAULT_COST*2);
12797 
12798   ins_encode %{
12799     __ cmpw(CCR0, $src1$$Register, $src2$$Register);
12800     __ isel($dst$$Register, CCR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
12801   %}
12802   ins_pipe(pipe_class_default);
12803 %}
12804 
12805 //---------- Population Count Instructions ------------------------------------
12806 
12807 // Popcnt for Power7.
12808 instruct popCountI(iRegIdst dst, iRegIsrc src) %{
12809   match(Set dst (PopCountI src));
12810   predicate(UsePopCountInstruction && VM_Version::has_popcntw());
12811   ins_cost(DEFAULT_COST);
12812 
12813   format %{ "POPCNTW $dst, $src" %}
12814   size(4);
12815   ins_encode %{
12816     __ popcntw($dst$$Register, $src$$Register);
12817   %}
12818   ins_pipe(pipe_class_default);
12819 %}
12820 
12821 // Popcnt for Power7.
12822 instruct popCountL(iRegIdst dst, iRegLsrc src) %{
12823   predicate(UsePopCountInstruction && VM_Version::has_popcntw());
12824   match(Set dst (PopCountL src));
12825   ins_cost(DEFAULT_COST);
12826 
12827   format %{ "POPCNTD $dst, $src" %}
12828   size(4);
12829   ins_encode %{
12830     __ popcntd($dst$$Register, $src$$Register);
12831   %}
12832   ins_pipe(pipe_class_default);
12833 %}
12834 
12835 instruct countLeadingZerosI(iRegIdst dst, iRegIsrc src) %{
12836   match(Set dst (CountLeadingZerosI src));
12837   predicate(UseCountLeadingZerosInstructionsPPC64);  // See Matcher::match_rule_supported.
12838   ins_cost(DEFAULT_COST);
12839 
12840   format %{ "CNTLZW  $dst, $src" %}
12841   size(4);
12842   ins_encode %{
12843     __ cntlzw($dst$$Register, $src$$Register);
12844   %}
12845   ins_pipe(pipe_class_default);
12846 %}
12847 
12848 instruct countLeadingZerosL(iRegIdst dst, iRegLsrc src) %{
12849   match(Set dst (CountLeadingZerosL src));
12850   predicate(UseCountLeadingZerosInstructionsPPC64);  // See Matcher::match_rule_supported.
12851   ins_cost(DEFAULT_COST);
12852 
12853   format %{ "CNTLZD  $dst, $src" %}
12854   size(4);
12855   ins_encode %{
12856     __ cntlzd($dst$$Register, $src$$Register);
12857   %}
12858   ins_pipe(pipe_class_default);
12859 %}
12860 
12861 instruct countLeadingZerosP(iRegIdst dst, iRegPsrc src) %{
12862   // no match-rule, false predicate
12863   effect(DEF dst, USE src);
12864   predicate(false);
12865 
12866   format %{ "CNTLZD  $dst, $src" %}
12867   size(4);
12868   ins_encode %{
12869     __ cntlzd($dst$$Register, $src$$Register);
12870   %}
12871   ins_pipe(pipe_class_default);
12872 %}
12873 
12874 instruct countTrailingZerosI_Ex(iRegIdst dst, iRegIsrc src) %{
12875   match(Set dst (CountTrailingZerosI src));
12876   predicate(UseCountLeadingZerosInstructionsPPC64 && !UseCountTrailingZerosInstructionsPPC64);
12877   ins_cost(DEFAULT_COST);
12878 
12879   expand %{
12880     immI16 imm1 %{ (int)-1 %}
12881     immI16 imm2 %{ (int)32 %}
12882     immI_minus1 m1 %{ -1 %}
12883     iRegIdst tmpI1;
12884     iRegIdst tmpI2;
12885     iRegIdst tmpI3;
12886     addI_reg_imm16(tmpI1, src, imm1);
12887     andcI_reg_reg(tmpI2, src, m1, tmpI1);
12888     countLeadingZerosI(tmpI3, tmpI2);
12889     subI_imm16_reg(dst, imm2, tmpI3);
12890   %}
12891 %}
12892 
12893 instruct countTrailingZerosI_cnttzw(iRegIdst dst, iRegIsrc src) %{
12894   match(Set dst (CountTrailingZerosI src));
12895   predicate(UseCountTrailingZerosInstructionsPPC64);
12896   ins_cost(DEFAULT_COST);
12897 
12898   format %{ "CNTTZW  $dst, $src" %}
12899   size(4);
12900   ins_encode %{
12901     __ cnttzw($dst$$Register, $src$$Register);
12902   %}
12903   ins_pipe(pipe_class_default);
12904 %}
12905 
12906 instruct countTrailingZerosL_Ex(iRegIdst dst, iRegLsrc src) %{
12907   match(Set dst (CountTrailingZerosL src));
12908   predicate(UseCountLeadingZerosInstructionsPPC64 && !UseCountTrailingZerosInstructionsPPC64);
12909   ins_cost(DEFAULT_COST);
12910 
12911   expand %{
12912     immL16 imm1 %{ (long)-1 %}
12913     immI16 imm2 %{ (int)64 %}
12914     iRegLdst tmpL1;
12915     iRegLdst tmpL2;
12916     iRegIdst tmpL3;
12917     addL_reg_imm16(tmpL1, src, imm1);
12918     andcL_reg_reg(tmpL2, tmpL1, src);
12919     countLeadingZerosL(tmpL3, tmpL2);
12920     subI_imm16_reg(dst, imm2, tmpL3);
12921  %}
12922 %}
12923 
12924 instruct countTrailingZerosL_cnttzd(iRegIdst dst, iRegLsrc src) %{
12925   match(Set dst (CountTrailingZerosL src));
12926   predicate(UseCountTrailingZerosInstructionsPPC64);
12927   ins_cost(DEFAULT_COST);
12928 
12929   format %{ "CNTTZD  $dst, $src" %}
12930   size(4);
12931   ins_encode %{
12932     __ cnttzd($dst$$Register, $src$$Register);
12933   %}
12934   ins_pipe(pipe_class_default);
12935 %}
12936 
12937 // Expand nodes for byte_reverse_int.
12938 instruct insrwi_a(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
12939   effect(DEF dst, USE src, USE pos, USE shift);
12940   predicate(false);
12941 
12942   format %{ "INSRWI  $dst, $src, $pos, $shift" %}
12943   size(4);
12944   ins_encode %{
12945     __ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
12946   %}
12947   ins_pipe(pipe_class_default);
12948 %}
12949 
12950 // As insrwi_a, but with USE_DEF.
12951 instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
12952   effect(USE_DEF dst, USE src, USE pos, USE shift);
12953   predicate(false);
12954 
12955   format %{ "INSRWI  $dst, $src, $pos, $shift" %}
12956   size(4);
12957   ins_encode %{
12958     __ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
12959   %}
12960   ins_pipe(pipe_class_default);
12961 %}
12962 
12963 // Just slightly faster than java implementation.
12964 instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
12965   match(Set dst (ReverseBytesI src));
12966   predicate(!UseByteReverseInstructions);
12967   ins_cost(7*DEFAULT_COST);
12968 
12969   expand %{
12970     immI16 imm24 %{ (int) 24 %}
12971     immI16 imm16 %{ (int) 16 %}
12972     immI16  imm8 %{ (int)  8 %}
12973     immI16  imm4 %{ (int)  4 %}
12974     immI16  imm0 %{ (int)  0 %}
12975     iRegLdst tmpI1;
12976     iRegLdst tmpI2;
12977     iRegLdst tmpI3;
12978 
12979     urShiftI_reg_imm(tmpI1, src, imm24);
12980     insrwi_a(dst, tmpI1, imm24, imm8);
12981     urShiftI_reg_imm(tmpI2, src, imm16);
12982     insrwi(dst, tmpI2, imm8, imm16);
12983     urShiftI_reg_imm(tmpI3, src, imm8);
12984     insrwi(dst, tmpI3, imm8, imm8);
12985     insrwi(dst, src, imm0, imm8);
12986   %}
12987 %}
12988 
12989 instruct bytes_reverse_int_vec(iRegIdst dst, iRegIsrc src, vecX tmpV) %{
12990   match(Set dst (ReverseBytesI src));
12991   predicate(UseVectorByteReverseInstructionsPPC64);
12992   effect(TEMP tmpV);
12993   ins_cost(DEFAULT_COST*3);
12994   size(12);
12995   format %{ "MTVSRWZ $tmpV, $src\n"
12996             "\tXXBRW   $tmpV, $tmpV\n"
12997             "\tMFVSRWZ $dst, $tmpV" %}
12998 
12999   ins_encode %{
13000     __ mtvsrwz($tmpV$$VectorSRegister, $src$$Register);
13001     __ xxbrw($tmpV$$VectorSRegister, $tmpV$$VectorSRegister);
13002     __ mfvsrwz($dst$$Register, $tmpV$$VectorSRegister);
13003   %}
13004   ins_pipe(pipe_class_default);
13005 %}
13006 
13007 instruct bytes_reverse_int(iRegIdst dst, iRegIsrc src) %{
13008   match(Set dst (ReverseBytesI src));
13009   predicate(UseByteReverseInstructions);
13010   ins_cost(DEFAULT_COST);
13011   size(4);
13012 
13013   format %{ "BRW  $dst, $src" %}
13014 
13015   ins_encode %{
13016     __ brw($dst$$Register, $src$$Register);
13017   %}
13018   ins_pipe(pipe_class_default);
13019 %}
13020 
13021 instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
13022   match(Set dst (ReverseBytesL src));
13023   predicate(!UseByteReverseInstructions);
13024   ins_cost(15*DEFAULT_COST);
13025 
13026   expand %{
13027     immI16 imm56 %{ (int) 56 %}
13028     immI16 imm48 %{ (int) 48 %}
13029     immI16 imm40 %{ (int) 40 %}
13030     immI16 imm32 %{ (int) 32 %}
13031     immI16 imm24 %{ (int) 24 %}
13032     immI16 imm16 %{ (int) 16 %}
13033     immI16  imm8 %{ (int)  8 %}
13034     immI16  imm0 %{ (int)  0 %}
13035     iRegLdst tmpL1;
13036     iRegLdst tmpL2;
13037     iRegLdst tmpL3;
13038     iRegLdst tmpL4;
13039     iRegLdst tmpL5;
13040     iRegLdst tmpL6;
13041 
13042                                         // src   : |a|b|c|d|e|f|g|h|
13043     rldicl(tmpL1, src, imm8, imm24);    // tmpL1 : | | | |e|f|g|h|a|
13044     rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
13045     rldicl(tmpL3, tmpL2, imm32, imm0);  // tmpL3 : | | | |e| | | |a|
13046     rldicl(tmpL1, src, imm16, imm24);   // tmpL1 : | | | |f|g|h|a|b|
13047     rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
13048     rldicl(tmpL4, tmpL2, imm40, imm0);  // tmpL4 : | | |f| | | |b| |
13049     orL_reg_reg(tmpL5, tmpL3, tmpL4);   // tmpL5 : | | |f|e| | |b|a|
13050     rldicl(tmpL1, src, imm24, imm24);   // tmpL1 : | | | |g|h|a|b|c|
13051     rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
13052     rldicl(tmpL3, tmpL2, imm48, imm0);  // tmpL3 : | |g| | | |c| | |
13053     rldicl(tmpL1, src, imm32, imm24);   // tmpL1 : | | | |h|a|b|c|d|
13054     rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
13055     rldicl(tmpL4, tmpL2, imm56, imm0);  // tmpL4 : |h| | | |d| | | |
13056     orL_reg_reg(tmpL6, tmpL3, tmpL4);   // tmpL6 : |h|g| | |d|c| | |
13057     orL_reg_reg(dst, tmpL5, tmpL6);     // dst   : |h|g|f|e|d|c|b|a|
13058   %}
13059 %}
13060 
13061 instruct bytes_reverse_long_vec(iRegLdst dst, iRegLsrc src, vecX tmpV) %{
13062   match(Set dst (ReverseBytesL src));
13063   predicate(UseVectorByteReverseInstructionsPPC64);
13064   effect(TEMP tmpV);
13065   ins_cost(DEFAULT_COST*3);
13066   size(12);
13067   format %{ "MTVSRD  $tmpV, $src\n"
13068             "\tXXBRD   $tmpV, $tmpV\n"
13069             "\tMFVSRD  $dst, $tmpV" %}
13070 
13071   ins_encode %{
13072     __ mtvsrd($tmpV$$VectorSRegister, $src$$Register);
13073     __ xxbrd($tmpV$$VectorSRegister, $tmpV$$VectorSRegister);
13074     __ mfvsrd($dst$$Register, $tmpV$$VectorSRegister);
13075   %}
13076   ins_pipe(pipe_class_default);
13077 %}
13078 
13079 instruct bytes_reverse_long(iRegLdst dst, iRegLsrc src) %{
13080   match(Set dst (ReverseBytesL src));
13081   predicate(UseByteReverseInstructions);
13082   ins_cost(DEFAULT_COST);
13083   size(4);
13084 
13085   format %{ "BRD  $dst, $src" %}
13086 
13087   ins_encode %{
13088     __ brd($dst$$Register, $src$$Register);
13089   %}
13090   ins_pipe(pipe_class_default);
13091 %}
13092 
13093 instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
13094   match(Set dst (ReverseBytesUS src));
13095   predicate(!UseByteReverseInstructions);
13096   ins_cost(2*DEFAULT_COST);
13097 
13098   expand %{
13099     immI16  imm16 %{ (int) 16 %}
13100     immI16   imm8 %{ (int)  8 %}
13101 
13102     urShiftI_reg_imm(dst, src, imm8);
13103     insrwi(dst, src, imm16, imm8);
13104   %}
13105 %}
13106 
13107 instruct bytes_reverse_ushort(iRegIdst dst, iRegIsrc src) %{
13108   match(Set dst (ReverseBytesUS src));
13109   predicate(UseByteReverseInstructions);
13110   ins_cost(DEFAULT_COST);
13111   size(4);
13112 
13113   format %{ "BRH  $dst, $src" %}
13114 
13115   ins_encode %{
13116     __ brh($dst$$Register, $src$$Register);
13117   %}
13118   ins_pipe(pipe_class_default);
13119 %}
13120 
13121 instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
13122   match(Set dst (ReverseBytesS src));
13123   predicate(!UseByteReverseInstructions);
13124   ins_cost(3*DEFAULT_COST);
13125 
13126   expand %{
13127     immI16  imm16 %{ (int) 16 %}
13128     immI16   imm8 %{ (int)  8 %}
13129     iRegLdst tmpI1;
13130 
13131     urShiftI_reg_imm(tmpI1, src, imm8);
13132     insrwi(tmpI1, src, imm16, imm8);
13133     extsh(dst, tmpI1);
13134   %}
13135 %}
13136 
13137 instruct bytes_reverse_short(iRegIdst dst, iRegIsrc src) %{
13138   match(Set dst (ReverseBytesS src));
13139   predicate(UseByteReverseInstructions);
13140   ins_cost(DEFAULT_COST);
13141   size(8);
13142 
13143   format %{ "BRH   $dst, $src\n\t"
13144             "EXTSH $dst, $dst" %}
13145 
13146   ins_encode %{
13147     __ brh($dst$$Register, $src$$Register);
13148     __ extsh($dst$$Register, $dst$$Register);
13149   %}
13150   ins_pipe(pipe_class_default);
13151 %}
13152 
13153 // Load Integer reversed byte order
13154 instruct loadI_reversed(iRegIdst dst, indirect mem) %{
13155   match(Set dst (ReverseBytesI (LoadI mem)));
13156   predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
13157   ins_cost(MEMORY_REF_COST);
13158 
13159   size(4);
13160   ins_encode %{
13161     __ lwbrx($dst$$Register, $mem$$Register);
13162   %}
13163   ins_pipe(pipe_class_default);
13164 %}
13165 
13166 instruct loadI_reversed_acquire(iRegIdst dst, indirect mem) %{
13167   match(Set dst (ReverseBytesI (LoadI mem)));
13168   ins_cost(2 * MEMORY_REF_COST);
13169 
13170   size(12);
13171   ins_encode %{
13172     __ lwbrx($dst$$Register, $mem$$Register);
13173     __ twi_0($dst$$Register);
13174     __ isync();
13175   %}
13176   ins_pipe(pipe_class_default);
13177 %}
13178 
13179 // Load Long - aligned and reversed
13180 instruct loadL_reversed(iRegLdst dst, indirect mem) %{
13181   match(Set dst (ReverseBytesL (LoadL mem)));
13182   predicate(VM_Version::has_ldbrx() && (n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1))));
13183   ins_cost(MEMORY_REF_COST);
13184 
13185   size(4);
13186   ins_encode %{
13187     __ ldbrx($dst$$Register, $mem$$Register);
13188   %}
13189   ins_pipe(pipe_class_default);
13190 %}
13191 
13192 instruct loadL_reversed_acquire(iRegLdst dst, indirect mem) %{
13193   match(Set dst (ReverseBytesL (LoadL mem)));
13194   predicate(VM_Version::has_ldbrx());
13195   ins_cost(2 * MEMORY_REF_COST);
13196 
13197   size(12);
13198   ins_encode %{
13199     __ ldbrx($dst$$Register, $mem$$Register);
13200     __ twi_0($dst$$Register);
13201     __ isync();
13202   %}
13203   ins_pipe(pipe_class_default);
13204 %}
13205 
13206 // Load unsigned short / char reversed byte order
13207 instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
13208   match(Set dst (ReverseBytesUS (LoadUS mem)));
13209   predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
13210   ins_cost(MEMORY_REF_COST);
13211 
13212   size(4);
13213   ins_encode %{
13214     __ lhbrx($dst$$Register, $mem$$Register);
13215   %}
13216   ins_pipe(pipe_class_default);
13217 %}
13218 
13219 instruct loadUS_reversed_acquire(iRegIdst dst, indirect mem) %{
13220   match(Set dst (ReverseBytesUS (LoadUS mem)));
13221   ins_cost(2 * MEMORY_REF_COST);
13222 
13223   size(12);
13224   ins_encode %{
13225     __ lhbrx($dst$$Register, $mem$$Register);
13226     __ twi_0($dst$$Register);
13227     __ isync();
13228   %}
13229   ins_pipe(pipe_class_default);
13230 %}
13231 
13232 // Load short reversed byte order
13233 instruct loadS_reversed(iRegIdst dst, indirect mem) %{
13234   match(Set dst (ReverseBytesS (LoadS mem)));
13235   predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
13236   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
13237 
13238   size(8);
13239   ins_encode %{
13240     __ lhbrx($dst$$Register, $mem$$Register);
13241     __ extsh($dst$$Register, $dst$$Register);
13242   %}
13243   ins_pipe(pipe_class_default);
13244 %}
13245 
13246 instruct loadS_reversed_acquire(iRegIdst dst, indirect mem) %{
13247   match(Set dst (ReverseBytesS (LoadS mem)));
13248   ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
13249 
13250   size(16);
13251   ins_encode %{
13252     __ lhbrx($dst$$Register, $mem$$Register);
13253     __ twi_0($dst$$Register);
13254     __ extsh($dst$$Register, $dst$$Register);
13255     __ isync();
13256   %}
13257   ins_pipe(pipe_class_default);
13258 %}
13259 
13260 // Store Integer reversed byte order
13261 instruct storeI_reversed(iRegIsrc src, indirect mem) %{
13262   match(Set mem (StoreI mem (ReverseBytesI src)));
13263   ins_cost(MEMORY_REF_COST);
13264 
13265   size(4);
13266   ins_encode %{
13267     __ stwbrx($src$$Register, $mem$$Register);
13268   %}
13269   ins_pipe(pipe_class_default);
13270 %}
13271 
13272 // Store Long reversed byte order
13273 instruct storeL_reversed(iRegLsrc src, indirect mem) %{
13274   match(Set mem (StoreL mem (ReverseBytesL src)));
13275   predicate(VM_Version::has_stdbrx());
13276   ins_cost(MEMORY_REF_COST);
13277 
13278   size(4);
13279   ins_encode %{
13280     __ stdbrx($src$$Register, $mem$$Register);
13281   %}
13282   ins_pipe(pipe_class_default);
13283 %}
13284 
13285 // Store unsigned short / char reversed byte order
13286 instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
13287   match(Set mem (StoreC mem (ReverseBytesUS src)));
13288   ins_cost(MEMORY_REF_COST);
13289 
13290   size(4);
13291   ins_encode %{
13292     __ sthbrx($src$$Register, $mem$$Register);
13293   %}
13294   ins_pipe(pipe_class_default);
13295 %}
13296 
13297 // Store short reversed byte order
13298 instruct storeS_reversed(iRegIsrc src, indirect mem) %{
13299   match(Set mem (StoreC mem (ReverseBytesS src)));
13300   ins_cost(MEMORY_REF_COST);
13301 
13302   size(4);
13303   ins_encode %{
13304     __ sthbrx($src$$Register, $mem$$Register);
13305   %}
13306   ins_pipe(pipe_class_default);
13307 %}
13308 
13309 instruct mtvsrwz(vecX temp1, iRegIsrc src) %{
13310   effect(DEF temp1, USE src);
13311 
13312   format %{ "MTVSRWZ $temp1, $src \t// Move to 16-byte register" %}
13313   size(4);
13314   ins_encode %{
13315     __ mtvsrwz($temp1$$VectorSRegister, $src$$Register);
13316   %}
13317   ins_pipe(pipe_class_default);
13318 %}
13319 
13320 instruct xxspltw(vecX dst, vecX src, immI8 imm1) %{
13321   effect(DEF dst, USE src, USE imm1);
13322 
13323   format %{ "XXSPLTW $dst, $src, $imm1 \t// Splat word" %}
13324   size(4);
13325   ins_encode %{
13326     __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant);
13327   %}
13328   ins_pipe(pipe_class_default);
13329 %}
13330 
13331 instruct xscvdpspn_regF(vecX dst, regF src) %{
13332   effect(DEF dst, USE src);
13333 
13334   format %{ "XSCVDPSPN $dst, $src \t// Convert scalar single precision to vector single precision" %}
13335   size(4);
13336   ins_encode %{
13337     __ xscvdpspn($dst$$VectorSRegister, $src$$FloatRegister->to_vsr());
13338   %}
13339   ins_pipe(pipe_class_default);
13340 %}
13341 
13342 //---------- Replicate Vector Instructions ------------------------------------
13343 
13344 // Insrdi does replicate if src == dst.
13345 instruct repl32(iRegLdst dst) %{
13346   predicate(false);
13347   effect(USE_DEF dst);
13348 
13349   format %{ "INSRDI  $dst, #0, $dst, #32 \t// replicate" %}
13350   size(4);
13351   ins_encode %{
13352     __ insrdi($dst$$Register, $dst$$Register, 32, 0);
13353   %}
13354   ins_pipe(pipe_class_default);
13355 %}
13356 
13357 // Insrdi does replicate if src == dst.
13358 instruct repl48(iRegLdst dst) %{
13359   predicate(false);
13360   effect(USE_DEF dst);
13361 
13362   format %{ "INSRDI  $dst, #0, $dst, #48 \t// replicate" %}
13363   size(4);
13364   ins_encode %{
13365     __ insrdi($dst$$Register, $dst$$Register, 48, 0);
13366   %}
13367   ins_pipe(pipe_class_default);
13368 %}
13369 
13370 // Insrdi does replicate if src == dst.
13371 instruct repl56(iRegLdst dst) %{
13372   predicate(false);
13373   effect(USE_DEF dst);
13374 
13375   format %{ "INSRDI  $dst, #0, $dst, #56 \t// replicate" %}
13376   size(4);
13377   ins_encode %{
13378     __ insrdi($dst$$Register, $dst$$Register, 56, 0);
13379   %}
13380   ins_pipe(pipe_class_default);
13381 %}
13382 
13383 instruct repl8B_reg_Ex(iRegLdst dst, iRegIsrc src) %{
13384   match(Set dst (Replicate src));
13385   predicate(n->as_Vector()->length() == 8 &&
13386             Matcher::vector_element_basic_type(n) == T_BYTE);
13387   expand %{
13388     moveReg(dst, src);
13389     repl56(dst);
13390     repl48(dst);
13391     repl32(dst);
13392   %}
13393 %}
13394 
13395 instruct repl8B_immI0(iRegLdst dst, immI_0 zero) %{
13396   match(Set dst (Replicate zero));
13397   predicate(n->as_Vector()->length() == 8 &&
13398             Matcher::vector_element_basic_type(n) == T_BYTE);
13399   format %{ "LI      $dst, #0 \t// replicate8B" %}
13400   size(4);
13401   ins_encode %{
13402     __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
13403   %}
13404   ins_pipe(pipe_class_default);
13405 %}
13406 
13407 instruct repl8B_immIminus1(iRegLdst dst, immI_minus1 src) %{
13408   match(Set dst (Replicate src));
13409   predicate(n->as_Vector()->length() == 8 &&
13410             Matcher::vector_element_basic_type(n) == T_BYTE);
13411   format %{ "LI      $dst, #-1 \t// replicate8B" %}
13412   size(4);
13413   ins_encode %{
13414     __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
13415   %}
13416   ins_pipe(pipe_class_default);
13417 %}
13418 
13419 instruct repl16B_reg_Ex(vecX dst, iRegIsrc src) %{
13420   match(Set dst (Replicate src));
13421   predicate(n->as_Vector()->length() == 16 &&
13422             Matcher::vector_element_basic_type(n) == T_BYTE);
13423 
13424   expand %{
13425     iRegLdst tmpL;
13426     vecX tmpV;
13427     immI8  imm1 %{ (int)  1 %}
13428     moveReg(tmpL, src);
13429     repl56(tmpL);
13430     repl48(tmpL);
13431     mtvsrwz(tmpV, tmpL);
13432     xxspltw(dst, tmpV, imm1);
13433   %}
13434 %}
13435 
13436 instruct repl16B_immI0(vecX dst, immI_0 zero) %{
13437   match(Set dst (Replicate zero));
13438   predicate(n->as_Vector()->length() == 16 &&
13439             Matcher::vector_element_basic_type(n) == T_BYTE);
13440 
13441   format %{ "XXLXOR      $dst, $zero \t// replicate16B" %}
13442   size(4);
13443   ins_encode %{
13444     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13445   %}
13446   ins_pipe(pipe_class_default);
13447 %}
13448 
13449 instruct repl16B_immIminus1(vecX dst, immI_minus1 src) %{
13450   match(Set dst (Replicate src));
13451   predicate(n->as_Vector()->length() == 16 &&
13452             Matcher::vector_element_basic_type(n) == T_BYTE);
13453 
13454   format %{ "XXLEQV      $dst, $src \t// replicate16B" %}
13455   size(4);
13456   ins_encode %{
13457     __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13458   %}
13459   ins_pipe(pipe_class_default);
13460 %}
13461 
13462 instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{
13463   match(Set dst (Replicate src));
13464   predicate(n->as_Vector()->length() == 4 &&
13465             Matcher::vector_element_basic_type(n) == T_SHORT);
13466   expand %{
13467     moveReg(dst, src);
13468     repl48(dst);
13469     repl32(dst);
13470   %}
13471 %}
13472 
13473 instruct repl4S_immI0(iRegLdst dst, immI_0 zero) %{
13474   match(Set dst (Replicate zero));
13475   predicate(n->as_Vector()->length() == 4 &&
13476             Matcher::vector_element_basic_type(n) == T_SHORT);
13477   format %{ "LI      $dst, #0 \t// replicate4S" %}
13478   size(4);
13479   ins_encode %{
13480     __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
13481   %}
13482   ins_pipe(pipe_class_default);
13483 %}
13484 
13485 instruct repl4S_immIminus1(iRegLdst dst, immI_minus1 src) %{
13486   match(Set dst (Replicate src));
13487   predicate(n->as_Vector()->length() == 4 &&
13488             Matcher::vector_element_basic_type(n) == T_SHORT);
13489   format %{ "LI      $dst, -1 \t// replicate4S" %}
13490   size(4);
13491   ins_encode %{
13492     __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
13493   %}
13494   ins_pipe(pipe_class_default);
13495 %}
13496 
13497 instruct repl8S_reg_Ex(vecX dst, iRegIsrc src) %{
13498   match(Set dst (Replicate src));
13499   predicate(n->as_Vector()->length() == 8 &&
13500             Matcher::vector_element_basic_type(n) == T_SHORT);
13501 
13502   expand %{
13503     iRegLdst tmpL;
13504     vecX tmpV;
13505     immI8  zero %{ (int)  0 %}
13506     moveReg(tmpL, src);
13507     repl48(tmpL);
13508     repl32(tmpL);
13509     mtvsrd(tmpV, tmpL);
13510     xxpermdi(dst, tmpV, tmpV, zero);
13511   %}
13512 %}
13513 
13514 instruct repl8S_immI0(vecX dst, immI_0 zero) %{
13515   match(Set dst (Replicate zero));
13516   predicate(n->as_Vector()->length() == 8 &&
13517             Matcher::vector_element_basic_type(n) == T_SHORT);
13518 
13519   format %{ "XXLXOR      $dst, $zero \t// replicate8S" %}
13520   size(4);
13521   ins_encode %{
13522     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13523   %}
13524   ins_pipe(pipe_class_default);
13525 %}
13526 
13527 instruct repl8S_immIminus1(vecX dst, immI_minus1 src) %{
13528   match(Set dst (Replicate src));
13529   predicate(n->as_Vector()->length() == 8 &&
13530             Matcher::vector_element_basic_type(n) == T_SHORT);
13531 
13532   format %{ "XXLEQV      $dst, $src \t// replicate8S" %}
13533   size(4);
13534   ins_encode %{
13535     __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13536   %}
13537   ins_pipe(pipe_class_default);
13538 %}
13539 
13540 instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{
13541   match(Set dst (Replicate src));
13542   predicate(n->as_Vector()->length() == 2 &&
13543             Matcher::vector_element_basic_type(n) == T_INT);
13544   ins_cost(2 * DEFAULT_COST);
13545   expand %{
13546     moveReg(dst, src);
13547     repl32(dst);
13548   %}
13549 %}
13550 
13551 instruct repl2I_immI0(iRegLdst dst, immI_0 zero) %{
13552   match(Set dst (Replicate zero));
13553   predicate(n->as_Vector()->length() == 2 &&
13554             Matcher::vector_element_basic_type(n) == T_INT);
13555   format %{ "LI      $dst, #0 \t// replicate2I" %}
13556   size(4);
13557   ins_encode %{
13558     __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
13559   %}
13560   ins_pipe(pipe_class_default);
13561 %}
13562 
13563 instruct repl2I_immIminus1(iRegLdst dst, immI_minus1 src) %{
13564   match(Set dst (Replicate src));
13565   predicate(n->as_Vector()->length() == 2 &&
13566             Matcher::vector_element_basic_type(n) == T_INT);
13567   format %{ "LI      $dst, -1 \t// replicate2I" %}
13568   size(4);
13569   ins_encode %{
13570     __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
13571   %}
13572   ins_pipe(pipe_class_default);
13573 %}
13574 
13575 instruct repl4I_reg_Ex(vecX dst, iRegIsrc src) %{
13576   match(Set dst (Replicate src));
13577   predicate(n->as_Vector()->length() == 4 &&
13578             Matcher::vector_element_basic_type(n) == T_INT);
13579   ins_cost(2 * DEFAULT_COST);
13580 
13581   expand %{
13582     iRegLdst tmpL;
13583     vecX tmpV;
13584     immI8  zero %{ (int)  0 %}
13585     moveReg(tmpL, src);
13586     repl32(tmpL);
13587     mtvsrd(tmpV, tmpL);
13588     xxpermdi(dst, tmpV, tmpV, zero);
13589   %}
13590 %}
13591 
13592 instruct repl4I_immI0(vecX dst, immI_0 zero) %{
13593   match(Set dst (Replicate zero));
13594   predicate(n->as_Vector()->length() == 4 &&
13595             Matcher::vector_element_basic_type(n) == T_INT);
13596 
13597   format %{ "XXLXOR      $dst, $zero \t// replicate4I" %}
13598   size(4);
13599   ins_encode %{
13600     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13601   %}
13602   ins_pipe(pipe_class_default);
13603 %}
13604 
13605 instruct repl4I_immIminus1(vecX dst, immI_minus1 src) %{
13606   match(Set dst (Replicate src));
13607   predicate(n->as_Vector()->length() == 4 &&
13608             Matcher::vector_element_basic_type(n) == T_INT);
13609 
13610   format %{ "XXLEQV      $dst, $dst, $dst \t// replicate4I" %}
13611   size(4);
13612   ins_encode %{
13613     __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
13614   %}
13615   ins_pipe(pipe_class_default);
13616 %}
13617 
13618 // Move float to int register via stack, replicate.
13619 instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{
13620   match(Set dst (Replicate src));
13621   predicate(n->as_Vector()->length() == 2 &&
13622             Matcher::vector_element_basic_type(n) == T_FLOAT);
13623   ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
13624   expand %{
13625     stackSlotL tmpS;
13626     iRegIdst tmpI;
13627     moveF2I_reg_stack(tmpS, src);   // Move float to stack.
13628     moveF2I_stack_reg(tmpI, tmpS);  // Move stack to int reg.
13629     moveReg(dst, tmpI);             // Move int to long reg.
13630     repl32(dst);                    // Replicate bitpattern.
13631   %}
13632 %}
13633 
13634 // Replicate scalar constant to packed float values in Double register
13635 instruct repl2F_immF_Ex(iRegLdst dst, immF src) %{
13636   match(Set dst (Replicate src));
13637   predicate(n->as_Vector()->length() == 2 &&
13638             Matcher::vector_element_basic_type(n) == T_FLOAT);
13639   ins_cost(5 * DEFAULT_COST);
13640 
13641   format %{ "LD      $dst, offset, $constanttablebase\t// load replicated float $src $src from table, postalloc expanded" %}
13642   postalloc_expand( postalloc_expand_load_replF_constant(dst, src, constanttablebase) );
13643 %}
13644 
13645 // Replicate scalar zero constant to packed float values in Double register
13646 instruct repl2F_immF0(iRegLdst dst, immF_0 zero) %{
13647   match(Set dst (Replicate zero));
13648   predicate(n->as_Vector()->length() == 2 &&
13649             Matcher::vector_element_basic_type(n) == T_FLOAT);
13650 
13651   format %{ "LI      $dst, #0 \t// replicate2F" %}
13652   ins_encode %{
13653     __ li($dst$$Register, 0x0);
13654   %}
13655   ins_pipe(pipe_class_default);
13656 %}
13657 
13658 
13659 //----------Vector Arithmetic Instructions--------------------------------------
13660 
13661 // Vector Addition Instructions
13662 
13663 instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{
13664   match(Set dst (AddVB src1 src2));
13665   predicate(n->as_Vector()->length() == 16);
13666   format %{ "VADDUBM  $dst,$src1,$src2\t// add packed16B" %}
13667   size(4);
13668   ins_encode %{
13669     __ vaddubm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13670   %}
13671   ins_pipe(pipe_class_default);
13672 %}
13673 
13674 instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{
13675   match(Set dst (AddVS src1 src2));
13676   predicate(n->as_Vector()->length() == 8);
13677   format %{ "VADDUHM  $dst,$src1,$src2\t// add packed8S" %}
13678   size(4);
13679   ins_encode %{
13680     __ vadduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13681   %}
13682   ins_pipe(pipe_class_default);
13683 %}
13684 
13685 instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{
13686   match(Set dst (AddVI src1 src2));
13687   predicate(n->as_Vector()->length() == 4);
13688   format %{ "VADDUWM  $dst,$src1,$src2\t// add packed4I" %}
13689   size(4);
13690   ins_encode %{
13691     __ vadduwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13692   %}
13693   ins_pipe(pipe_class_default);
13694 %}
13695 
13696 instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{
13697   match(Set dst (AddVF src1 src2));
13698   predicate(n->as_Vector()->length() == 4);
13699   format %{ "VADDFP  $dst,$src1,$src2\t// add packed4F" %}
13700   size(4);
13701   ins_encode %{
13702     __ vaddfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13703   %}
13704   ins_pipe(pipe_class_default);
13705 %}
13706 
13707 instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{
13708   match(Set dst (AddVL src1 src2));
13709   predicate(n->as_Vector()->length() == 2);
13710   format %{ "VADDUDM  $dst,$src1,$src2\t// add packed2L" %}
13711   size(4);
13712   ins_encode %{
13713     __ vaddudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13714   %}
13715   ins_pipe(pipe_class_default);
13716 %}
13717 
13718 instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{
13719   match(Set dst (AddVD src1 src2));
13720   predicate(n->as_Vector()->length() == 2);
13721   format %{ "XVADDDP  $dst,$src1,$src2\t// add packed2D" %}
13722   size(4);
13723   ins_encode %{
13724     __ xvadddp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13725   %}
13726   ins_pipe(pipe_class_default);
13727 %}
13728 
13729 // Vector Subtraction Instructions
13730 
13731 instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{
13732   match(Set dst (SubVB src1 src2));
13733   predicate(n->as_Vector()->length() == 16);
13734   format %{ "VSUBUBM  $dst,$src1,$src2\t// sub packed16B" %}
13735   size(4);
13736   ins_encode %{
13737     __ vsububm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13738   %}
13739   ins_pipe(pipe_class_default);
13740 %}
13741 
13742 instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{
13743   match(Set dst (SubVS src1 src2));
13744   predicate(n->as_Vector()->length() == 8);
13745   format %{ "VSUBUHM  $dst,$src1,$src2\t// sub packed8S" %}
13746   size(4);
13747   ins_encode %{
13748     __ vsubuhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13749   %}
13750   ins_pipe(pipe_class_default);
13751 %}
13752 
13753 instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{
13754   match(Set dst (SubVI src1 src2));
13755   predicate(n->as_Vector()->length() == 4);
13756   format %{ "VSUBUWM  $dst,$src1,$src2\t// sub packed4I" %}
13757   size(4);
13758   ins_encode %{
13759     __ vsubuwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13760   %}
13761   ins_pipe(pipe_class_default);
13762 %}
13763 
13764 instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{
13765   match(Set dst (SubVF src1 src2));
13766   predicate(n->as_Vector()->length() == 4);
13767   format %{ "VSUBFP  $dst,$src1,$src2\t// sub packed4F" %}
13768   size(4);
13769   ins_encode %{
13770     __ vsubfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13771   %}
13772   ins_pipe(pipe_class_default);
13773 %}
13774 
13775 instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{
13776   match(Set dst (SubVL src1 src2));
13777   predicate(n->as_Vector()->length() == 2);
13778   format %{ "VSUBUDM  $dst,$src1,$src2\t// sub packed2L" %}
13779   size(4);
13780   ins_encode %{
13781     __ vsubudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13782   %}
13783   ins_pipe(pipe_class_default);
13784 %}
13785 
13786 instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{
13787   match(Set dst (SubVD src1 src2));
13788   predicate(n->as_Vector()->length() == 2);
13789   format %{ "XVSUBDP  $dst,$src1,$src2\t// sub packed2D" %}
13790   size(4);
13791   ins_encode %{
13792     __ xvsubdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13793   %}
13794   ins_pipe(pipe_class_default);
13795 %}
13796 
13797 // Vector Multiplication Instructions
13798 
13799 instruct vmul8S_reg(vecX dst, vecX src1, vecX src2, vecX tmp) %{
13800   match(Set dst (MulVS src1 src2));
13801   predicate(n->as_Vector()->length() == 8);
13802   effect(TEMP tmp);
13803   format %{ "VSPLTISH  $tmp,0\t// mul packed8S" %}
13804   format %{ "VMLADDUHM  $dst,$src1,$src2\t// mul packed8S" %}
13805   size(8);
13806   ins_encode %{
13807     __ vspltish($tmp$$VectorSRegister->to_vr(), 0);
13808     __ vmladduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr(), $tmp$$VectorSRegister->to_vr());
13809   %}
13810   ins_pipe(pipe_class_default);
13811 %}
13812 
13813 instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{
13814   match(Set dst (MulVI src1 src2));
13815   predicate(n->as_Vector()->length() == 4);
13816   format %{ "VMULUWM  $dst,$src1,$src2\t// mul packed4I" %}
13817   size(4);
13818   ins_encode %{
13819     __ vmuluwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
13820   %}
13821   ins_pipe(pipe_class_default);
13822 %}
13823 
13824 instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{
13825   match(Set dst (MulVF src1 src2));
13826   predicate(n->as_Vector()->length() == 4);
13827   format %{ "XVMULSP  $dst,$src1,$src2\t// mul packed4F" %}
13828   size(4);
13829   ins_encode %{
13830     __ xvmulsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13831   %}
13832   ins_pipe(pipe_class_default);
13833 %}
13834 
13835 instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{
13836   match(Set dst (MulVD src1 src2));
13837   predicate(n->as_Vector()->length() == 2);
13838   format %{ "XVMULDP  $dst,$src1,$src2\t// mul packed2D" %}
13839   size(4);
13840   ins_encode %{
13841     __ xvmuldp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13842   %}
13843   ins_pipe(pipe_class_default);
13844 %}
13845 
13846 // Vector Division Instructions
13847 
13848 instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{
13849   match(Set dst (DivVF src1 src2));
13850   predicate(n->as_Vector()->length() == 4);
13851   format %{ "XVDIVSP  $dst,$src1,$src2\t// div packed4F" %}
13852   size(4);
13853   ins_encode %{
13854     __ xvdivsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13855   %}
13856   ins_pipe(pipe_class_default);
13857 %}
13858 
13859 instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{
13860   match(Set dst (DivVD src1 src2));
13861   predicate(n->as_Vector()->length() == 2);
13862   format %{ "XVDIVDP  $dst,$src1,$src2\t// div packed2D" %}
13863   size(4);
13864   ins_encode %{
13865     __ xvdivdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
13866   %}
13867   ins_pipe(pipe_class_default);
13868 %}
13869 
13870 // Vector Absolute Instructions
13871 
13872 instruct vabs4F_reg(vecX dst, vecX src) %{
13873   match(Set dst (AbsVF src));
13874   predicate(n->as_Vector()->length() == 4);
13875   format %{ "XVABSSP $dst,$src\t// absolute packed4F" %}
13876   size(4);
13877   ins_encode %{
13878     __ xvabssp($dst$$VectorSRegister, $src$$VectorSRegister);
13879   %}
13880   ins_pipe(pipe_class_default);
13881 %}
13882 
13883 instruct vabs2D_reg(vecX dst, vecX src) %{
13884   match(Set dst (AbsVD src));
13885   predicate(n->as_Vector()->length() == 2);
13886   format %{ "XVABSDP $dst,$src\t// absolute packed2D" %}
13887   size(4);
13888   ins_encode %{
13889     __ xvabsdp($dst$$VectorSRegister, $src$$VectorSRegister);
13890   %}
13891   ins_pipe(pipe_class_default);
13892 %}
13893 
13894 // Round Instructions
13895 instruct roundD_reg(regD dst, regD src, immI8 rmode) %{
13896   match(Set dst (RoundDoubleMode src rmode));
13897   format %{ "RoundDoubleMode $src,$rmode" %}
13898   size(4);
13899   ins_encode %{
13900     switch ($rmode$$constant) {
13901       case RoundDoubleModeNode::rmode_rint:
13902         __ xvrdpic($dst$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr());
13903         break;
13904       case RoundDoubleModeNode::rmode_floor:
13905         __ frim($dst$$FloatRegister, $src$$FloatRegister);
13906         break;
13907       case RoundDoubleModeNode::rmode_ceil:
13908         __ frip($dst$$FloatRegister, $src$$FloatRegister);
13909         break;
13910       default:
13911         ShouldNotReachHere();
13912     }
13913   %}
13914   ins_pipe(pipe_class_default);
13915 %}
13916 
13917 // Vector Round Instructions
13918 instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{
13919   match(Set dst (RoundDoubleModeV src rmode));
13920   predicate(n->as_Vector()->length() == 2);
13921   format %{ "RoundDoubleModeV $src,$rmode" %}
13922   size(4);
13923   ins_encode %{
13924     switch ($rmode$$constant) {
13925       case RoundDoubleModeNode::rmode_rint:
13926         __ xvrdpic($dst$$VectorSRegister, $src$$VectorSRegister);
13927         break;
13928       case RoundDoubleModeNode::rmode_floor:
13929         __ xvrdpim($dst$$VectorSRegister, $src$$VectorSRegister);
13930         break;
13931       case RoundDoubleModeNode::rmode_ceil:
13932         __ xvrdpip($dst$$VectorSRegister, $src$$VectorSRegister);
13933         break;
13934       default:
13935         ShouldNotReachHere();
13936     }
13937   %}
13938   ins_pipe(pipe_class_default);
13939 %}
13940 
13941 // Vector Negate Instructions
13942 
13943 instruct vneg4F_reg(vecX dst, vecX src) %{
13944   match(Set dst (NegVF src));
13945   predicate(n->as_Vector()->length() == 4);
13946   format %{ "XVNEGSP $dst,$src\t// negate packed4F" %}
13947   size(4);
13948   ins_encode %{
13949     __ xvnegsp($dst$$VectorSRegister, $src$$VectorSRegister);
13950   %}
13951   ins_pipe(pipe_class_default);
13952 %}
13953 
13954 instruct vneg2D_reg(vecX dst, vecX src) %{
13955   match(Set dst (NegVD src));
13956   predicate(n->as_Vector()->length() == 2);
13957   format %{ "XVNEGDP $dst,$src\t// negate packed2D" %}
13958   size(4);
13959   ins_encode %{
13960     __ xvnegdp($dst$$VectorSRegister, $src$$VectorSRegister);
13961   %}
13962   ins_pipe(pipe_class_default);
13963 %}
13964 
13965 // Vector Square Root Instructions
13966 
13967 instruct vsqrt4F_reg(vecX dst, vecX src) %{
13968   match(Set dst (SqrtVF src));
13969   predicate(n->as_Vector()->length() == 4);
13970   format %{ "XVSQRTSP $dst,$src\t// sqrt packed4F" %}
13971   size(4);
13972   ins_encode %{
13973     __ xvsqrtsp($dst$$VectorSRegister, $src$$VectorSRegister);
13974   %}
13975   ins_pipe(pipe_class_default);
13976 %}
13977 
13978 instruct vsqrt2D_reg(vecX dst, vecX src) %{
13979   match(Set dst (SqrtVD src));
13980   predicate(n->as_Vector()->length() == 2);
13981   format %{ "XVSQRTDP  $dst,$src\t// sqrt packed2D" %}
13982   size(4);
13983   ins_encode %{
13984     __ xvsqrtdp($dst$$VectorSRegister, $src$$VectorSRegister);
13985   %}
13986   ins_pipe(pipe_class_default);
13987 %}
13988 
13989 // Vector Population Count Instructions
13990 
13991 instruct vpopcnt_reg(vecX dst, vecX src) %{
13992   match(Set dst (PopCountVI src));
13993   format %{ "VPOPCNT $dst,$src\t// pop count packed" %}
13994   size(4);
13995   ins_encode %{
13996     BasicType bt = Matcher::vector_element_basic_type(this);
13997     switch (bt) {
13998       case T_BYTE:
13999         __ vpopcntb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
14000         break;
14001       case T_SHORT:
14002         __ vpopcnth($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
14003         break;
14004       case T_INT:
14005         __ vpopcntw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
14006         break;
14007       case T_LONG:
14008         __ vpopcntd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
14009         break;
14010       default:
14011         ShouldNotReachHere();
14012     }
14013   %}
14014   ins_pipe(pipe_class_default);
14015 %}
14016 
14017 // --------------------------------- FMA --------------------------------------
14018 // src1 * src2 + dst
14019 instruct vfma4F(vecX dst, vecX src1, vecX src2) %{
14020   match(Set dst (FmaVF dst (Binary src1 src2)));
14021   predicate(n->as_Vector()->length() == 4);
14022 
14023   format %{ "XVMADDASP   $dst, $src1, $src2" %}
14024 
14025   size(4);
14026   ins_encode %{
14027     assert(UseFMA, "Needs FMA instructions support.");
14028     __ xvmaddasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14029   %}
14030   ins_pipe(pipe_class_default);
14031 %}
14032 
14033 // src1 * (-src2) + dst
14034 // "(-src1) * src2 + dst" has been idealized to "src2 * (-src1) + dst"
14035 instruct vfma4F_neg1(vecX dst, vecX src1, vecX src2) %{
14036   match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
14037   predicate(n->as_Vector()->length() == 4);
14038 
14039   format %{ "XVNMSUBASP   $dst, $src1, $src2" %}
14040 
14041   size(4);
14042   ins_encode %{
14043     assert(UseFMA, "Needs FMA instructions support.");
14044     __ xvnmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14045   %}
14046   ins_pipe(pipe_class_default);
14047 %}
14048 
14049 // src1 * src2 - dst
14050 instruct vfma4F_neg2(vecX dst, vecX src1, vecX src2) %{
14051   match(Set dst (FmaVF (NegVF dst) (Binary src1 src2)));
14052   predicate(n->as_Vector()->length() == 4);
14053 
14054   format %{ "XVMSUBASP   $dst, $src1, $src2" %}
14055 
14056   size(4);
14057   ins_encode %{
14058     assert(UseFMA, "Needs FMA instructions support.");
14059     __ xvmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14060   %}
14061   ins_pipe(pipe_class_default);
14062 %}
14063 
14064 // src1 * src2 + dst
14065 instruct vfma2D(vecX dst, vecX src1, vecX src2) %{
14066   match(Set dst (FmaVD  dst (Binary src1 src2)));
14067   predicate(n->as_Vector()->length() == 2);
14068 
14069   format %{ "XVMADDADP   $dst, $src1, $src2" %}
14070 
14071   size(4);
14072   ins_encode %{
14073     assert(UseFMA, "Needs FMA instructions support.");
14074     __ xvmaddadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14075   %}
14076   ins_pipe(pipe_class_default);
14077 %}
14078 
14079 // src1 * (-src2) + dst
14080 // "(-src1) * src2 + dst" has been idealized to "src2 * (-src1) + dst"
14081 instruct vfma2D_neg1(vecX dst, vecX src1, vecX src2) %{
14082   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
14083   predicate(n->as_Vector()->length() == 2);
14084 
14085   format %{ "XVNMSUBADP   $dst, $src1, $src2" %}
14086 
14087   size(4);
14088   ins_encode %{
14089     assert(UseFMA, "Needs FMA instructions support.");
14090     __ xvnmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14091   %}
14092   ins_pipe(pipe_class_default);
14093 %}
14094 
14095 // src1 * src2 - dst
14096 instruct vfma2D_neg2(vecX dst, vecX src1, vecX src2) %{
14097   match(Set dst (FmaVD (NegVD dst) (Binary src1 src2)));
14098   predicate(n->as_Vector()->length() == 2);
14099 
14100   format %{ "XVMSUBADP   $dst, $src1, $src2" %}
14101 
14102   size(4);
14103   ins_encode %{
14104     assert(UseFMA, "Needs FMA instructions support.");
14105     __ xvmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
14106   %}
14107   ins_pipe(pipe_class_default);
14108 %}
14109 
14110 //----------Overflow Math Instructions-----------------------------------------
14111 
14112 // Note that we have to make sure that XER.SO is reset before using overflow instructions.
14113 // Simple Overflow operations can be matched by very few instructions (e.g. addExact: xor, and_, bc).
14114 // Seems like only Long intrinsincs have an advantage. (The only expensive one is OverflowMulL.)
14115 
14116 instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
14117   match(Set cr0 (OverflowAddL op1 op2));
14118 
14119   format %{ "add_    $op1, $op2\t# overflow check long" %}
14120   ins_encode %{
14121     __ li(R0, 0);
14122     __ mtxer(R0); // clear XER.SO
14123     __ addo_(R0, $op1$$Register, $op2$$Register);
14124   %}
14125   ins_pipe(pipe_class_default);
14126 %}
14127 
14128 instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
14129   match(Set cr0 (OverflowSubL op1 op2));
14130 
14131   format %{ "subfo_  R0, $op2, $op1\t# overflow check long" %}
14132   ins_encode %{
14133     __ li(R0, 0);
14134     __ mtxer(R0); // clear XER.SO
14135     __ subfo_(R0, $op2$$Register, $op1$$Register);
14136   %}
14137   ins_pipe(pipe_class_default);
14138 %}
14139 
14140 instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{
14141   match(Set cr0 (OverflowSubL zero op2));
14142 
14143   format %{ "nego_   R0, $op2\t# overflow check long" %}
14144   ins_encode %{
14145     __ li(R0, 0);
14146     __ mtxer(R0); // clear XER.SO
14147     __ nego_(R0, $op2$$Register);
14148   %}
14149   ins_pipe(pipe_class_default);
14150 %}
14151 
14152 instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
14153   match(Set cr0 (OverflowMulL op1 op2));
14154 
14155   format %{ "mulldo_ R0, $op1, $op2\t# overflow check long" %}
14156   ins_encode %{
14157     __ li(R0, 0);
14158     __ mtxer(R0); // clear XER.SO
14159     __ mulldo_(R0, $op1$$Register, $op2$$Register);
14160   %}
14161   ins_pipe(pipe_class_default);
14162 %}
14163 
14164 instruct repl4F_reg_Ex(vecX dst, regF src) %{
14165   match(Set dst (Replicate src));
14166   predicate(n->as_Vector()->length() == 4 &&
14167             Matcher::vector_element_basic_type(n) == T_FLOAT);
14168   ins_cost(DEFAULT_COST);
14169   expand %{
14170     vecX tmpV;
14171     immI8  zero %{ (int)  0 %}
14172 
14173     xscvdpspn_regF(tmpV, src);
14174     xxspltw(dst, tmpV, zero);
14175   %}
14176 %}
14177 
14178 instruct repl4F_immF_Ex(vecX dst, immF src, iRegLdst tmp) %{
14179   match(Set dst (Replicate src));
14180   predicate(n->as_Vector()->length() == 4 &&
14181             Matcher::vector_element_basic_type(n) == T_FLOAT);
14182   effect(TEMP tmp);
14183   ins_cost(10 * DEFAULT_COST);
14184 
14185   postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase, tmp) );
14186 %}
14187 
14188 instruct repl4F_immF0(vecX dst, immF_0 zero) %{
14189   match(Set dst (Replicate zero));
14190   predicate(n->as_Vector()->length() == 4 &&
14191             Matcher::vector_element_basic_type(n) == T_FLOAT);
14192 
14193   format %{ "XXLXOR      $dst, $zero \t// replicate4F" %}
14194   ins_encode %{
14195     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
14196   %}
14197   ins_pipe(pipe_class_default);
14198 %}
14199 
14200 instruct repl2D_reg_Ex(vecX dst, regD src) %{
14201   match(Set dst (Replicate src));
14202   predicate(n->as_Vector()->length() == 2 &&
14203             Matcher::vector_element_basic_type(n) == T_DOUBLE);
14204 
14205   format %{ "XXPERMDI      $dst, $src, $src, 0 \t// Splat doubleword" %}
14206   size(4);
14207   ins_encode %{
14208     __ xxpermdi($dst$$VectorSRegister, $src$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr(), 0);
14209   %}
14210   ins_pipe(pipe_class_default);
14211 %}
14212 
14213 instruct repl2D_immD0(vecX dst, immD_0 zero) %{
14214   match(Set dst (Replicate zero));
14215   predicate(n->as_Vector()->length() == 2 &&
14216             Matcher::vector_element_basic_type(n) == T_DOUBLE);
14217 
14218   format %{ "XXLXOR      $dst, $zero \t// replicate2D" %}
14219   size(4);
14220   ins_encode %{
14221     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
14222   %}
14223   ins_pipe(pipe_class_default);
14224 %}
14225 
14226 instruct mtvsrd(vecX dst, iRegLsrc src) %{
14227   predicate(false);
14228   effect(DEF dst, USE src);
14229 
14230   format %{ "MTVSRD      $dst, $src \t// Move to 16-byte register" %}
14231   size(4);
14232   ins_encode %{
14233     __ mtvsrd($dst$$VectorSRegister, $src$$Register);
14234   %}
14235   ins_pipe(pipe_class_default);
14236 %}
14237 
14238 instruct xxspltd(vecX dst, vecX src, immI8 zero) %{
14239   effect(DEF dst, USE src, USE zero);
14240 
14241   format %{ "XXSPLATD      $dst, $src, $zero \t// Splat doubleword" %}
14242   size(4);
14243   ins_encode %{
14244     __ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant);
14245   %}
14246   ins_pipe(pipe_class_default);
14247 %}
14248 
14249 instruct xxpermdi(vecX dst, vecX src1, vecX src2, immI8 zero) %{
14250   effect(DEF dst, USE src1, USE src2, USE zero);
14251 
14252   format %{ "XXPERMDI      $dst, $src1, $src2, $zero \t// Splat doubleword" %}
14253   size(4);
14254   ins_encode %{
14255     __ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant);
14256   %}
14257   ins_pipe(pipe_class_default);
14258 %}
14259 
14260 instruct repl2L_reg_Ex(vecX dst, iRegLsrc src) %{
14261   predicate(Matcher::vector_element_basic_type(n) == T_LONG);
14262   match(Set dst (Replicate src));
14263   predicate(n->as_Vector()->length() == 2);
14264   expand %{
14265     vecX tmpV;
14266     immI8  zero %{ (int)  0 %}
14267     mtvsrd(tmpV, src);
14268     xxpermdi(dst, tmpV, tmpV, zero);
14269   %}
14270 %}
14271 
14272 instruct repl2L_immI0(vecX dst, immI_0 zero) %{
14273   match(Set dst (Replicate zero));
14274   predicate(n->as_Vector()->length() == 2 &&
14275             Matcher::vector_element_basic_type(n) == T_LONG);
14276 
14277   format %{ "XXLXOR      $dst, $zero \t// replicate2L" %}
14278   size(4);
14279   ins_encode %{
14280     __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
14281   %}
14282   ins_pipe(pipe_class_default);
14283 %}
14284 
14285 instruct repl2L_immIminus1(vecX dst, immI_minus1 src) %{
14286   match(Set dst (Replicate src));
14287   predicate(n->as_Vector()->length() == 2 &&
14288             Matcher::vector_element_basic_type(n) == T_LONG);
14289 
14290   format %{ "XXLEQV      $dst, $src \t// replicate2L" %}
14291   size(4);
14292   ins_encode %{
14293     __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
14294   %}
14295   ins_pipe(pipe_class_default);
14296 %}
14297 
14298 // ============================================================================
14299 // Safepoint Instruction
14300 
14301 instruct safePoint_poll(iRegPdst poll) %{
14302   match(SafePoint poll);
14303 
14304   // It caused problems to add the effect that r0 is killed, but this
14305   // effect no longer needs to be mentioned, since r0 is not contained
14306   // in a reg_class.
14307 
14308   format %{ "LD      R0, #0, $poll \t// Safepoint poll for GC" %}
14309   size(4);
14310   ins_encode( enc_poll(0x0, poll) );
14311   ins_pipe(pipe_class_default);
14312 %}
14313 
14314 // ============================================================================
14315 // Call Instructions
14316 
14317 // Call Java Static Instruction
14318 
14319 source %{
14320 
14321 #include "runtime/continuation.hpp"
14322 
14323 %}
14324 
14325 // Schedulable version of call static node.
14326 instruct CallStaticJavaDirect(method meth) %{
14327   match(CallStaticJava);
14328   effect(USE meth);
14329   ins_cost(CALL_COST);
14330 
14331   ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
14332 
14333   format %{ "CALL,static $meth \t// ==> " %}
14334   size((Continuations::enabled() ? 8 : 4));
14335   ins_encode( enc_java_static_call(meth) );
14336   ins_pipe(pipe_class_call);
14337 %}
14338 
14339 // Call Java Dynamic Instruction
14340 
14341 // Used by postalloc expand of CallDynamicJavaDirectSchedEx (actual call).
14342 // Loading of IC was postalloc expanded. The nodes loading the IC are reachable
14343 // via fields ins_field_load_ic_hi_node and ins_field_load_ic_node.
14344 // The call destination must still be placed in the constant pool.
14345 instruct CallDynamicJavaDirectSched(method meth) %{
14346   match(CallDynamicJava); // To get all the data fields we need ...
14347   effect(USE meth);
14348   predicate(false);       // ... but never match.
14349 
14350   ins_field_load_ic_hi_node(loadConL_hiNode*);
14351   ins_field_load_ic_node(loadConLNode*);
14352   ins_num_consts(1 /* 1 patchable constant: call destination */);
14353 
14354   format %{ "BL        \t// dynamic $meth ==> " %}
14355   size((Continuations::enabled() ? 8 : 4));
14356   ins_encode( enc_java_dynamic_call_sched(meth) );
14357   ins_pipe(pipe_class_call);
14358 %}
14359 
14360 // Schedulable (i.e. postalloc expanded) version of call dynamic java.
14361 // We use postalloc expanded calls if we use inline caches
14362 // and do not update method data.
14363 //
14364 // This instruction has two constants: inline cache (IC) and call destination.
14365 // Loading the inline cache will be postalloc expanded, thus leaving a call with
14366 // one constant.
14367 instruct CallDynamicJavaDirectSched_Ex(method meth) %{
14368   match(CallDynamicJava);
14369   effect(USE meth);
14370   predicate(UseInlineCaches);
14371   ins_cost(CALL_COST);
14372 
14373   ins_num_consts(2 /* 2 patchable constants: inline cache, call destination. */);
14374 
14375   format %{ "CALL,dynamic $meth \t// postalloc expanded" %}
14376   postalloc_expand( postalloc_expand_java_dynamic_call_sched(meth, constanttablebase) );
14377 %}
14378 
14379 // Compound version of call dynamic java
14380 // We use postalloc expanded calls if we use inline caches
14381 // and do not update method data.
14382 instruct CallDynamicJavaDirect(method meth) %{
14383   match(CallDynamicJava);
14384   effect(USE meth);
14385   predicate(!UseInlineCaches);
14386   ins_cost(CALL_COST);
14387 
14388   // Enc_java_to_runtime_call needs up to 4 constants (method data oop).
14389   ins_num_consts(4);
14390 
14391   format %{ "CALL,dynamic $meth \t// ==> " %}
14392   ins_encode( enc_java_dynamic_call(meth, constanttablebase) );
14393   ins_pipe(pipe_class_call);
14394 %}
14395 
14396 // Call Runtime Instruction
14397 
14398 instruct CallRuntimeDirect(method meth) %{
14399   match(CallRuntime);
14400   effect(USE meth);
14401   ins_cost(CALL_COST);
14402 
14403   // Enc_java_to_runtime_call needs up to 3 constants: call target,
14404   // env for callee, C-toc.
14405   ins_num_consts(3);
14406 
14407   format %{ "CALL,runtime" %}
14408   ins_encode( enc_java_to_runtime_call(meth) );
14409   ins_pipe(pipe_class_call);
14410 %}
14411 
14412 // Call Leaf
14413 
14414 // Used by postalloc expand of CallLeafDirect_Ex (mtctr).
14415 instruct CallLeafDirect_mtctr(iRegLdst dst, iRegLsrc src) %{
14416   effect(DEF dst, USE src);
14417 
14418   ins_num_consts(1);
14419 
14420   format %{ "MTCTR   $src" %}
14421   size(4);
14422   ins_encode( enc_leaf_call_mtctr(src) );
14423   ins_pipe(pipe_class_default);
14424 %}
14425 
14426 // Used by postalloc expand of CallLeafDirect_Ex (actual call).
14427 instruct CallLeafDirect(method meth) %{
14428   match(CallLeaf);   // To get the data all the data fields we need ...
14429   effect(USE meth);
14430   predicate(false);  // but never match.
14431 
14432   format %{ "BCTRL     \t// leaf call $meth ==> " %}
14433   size((Continuations::enabled() ? 8 : 4));
14434   ins_encode %{
14435     __ bctrl();
14436     __ post_call_nop();
14437   %}
14438   ins_pipe(pipe_class_call);
14439 %}
14440 
14441 // postalloc expand of CallLeafDirect.
14442 // Load address to call from TOC, then bl to it.
14443 instruct CallLeafDirect_Ex(method meth) %{
14444   match(CallLeaf);
14445   effect(USE meth);
14446   ins_cost(CALL_COST);
14447 
14448   // Postalloc_expand_java_to_runtime_call needs up to 3 constants: call target,
14449   // env for callee, C-toc.
14450   ins_num_consts(3);
14451 
14452   format %{ "CALL,runtime leaf $meth \t// postalloc expanded" %}
14453   postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
14454 %}
14455 
14456 // Call runtime without safepoint - same as CallLeaf.
14457 // postalloc expand of CallLeafNoFPDirect.
14458 // Load address to call from TOC, then bl to it.
14459 instruct CallLeafNoFPDirect_Ex(method meth) %{
14460   match(CallLeafNoFP);
14461   effect(USE meth);
14462   ins_cost(CALL_COST);
14463 
14464   // Enc_java_to_runtime_call needs up to 3 constants: call target,
14465   // env for callee, C-toc.
14466   ins_num_consts(3);
14467 
14468   format %{ "CALL,runtime leaf nofp $meth \t// postalloc expanded" %}
14469   postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
14470 %}
14471 
14472 // Tail Call; Jump from runtime stub to Java code.
14473 // Also known as an 'interprocedural jump'.
14474 // Target of jump will eventually return to caller.
14475 // TailJump below removes the return address.
14476 instruct TailCalljmpInd(iRegPdstNoScratch jump_target, inline_cache_regP method_ptr) %{
14477   match(TailCall jump_target method_ptr);
14478   ins_cost(CALL_COST);
14479 
14480   format %{ "MTCTR   $jump_target \t// $method_ptr holds method\n\t"
14481             "BCTR         \t// tail call" %}
14482   size(8);
14483   ins_encode %{
14484     __ mtctr($jump_target$$Register);
14485     __ bctr();
14486   %}
14487   ins_pipe(pipe_class_call);
14488 %}
14489 
14490 // Return Instruction
14491 instruct Ret() %{
14492   match(Return);
14493   format %{ "BLR      \t// branch to link register" %}
14494   size(4);
14495   ins_encode %{
14496     // LR is restored in MachEpilogNode. Just do the RET here.
14497     __ blr();
14498   %}
14499   ins_pipe(pipe_class_default);
14500 %}
14501 
14502 // Tail Jump; remove the return address; jump to target.
14503 // TailCall above leaves the return address around.
14504 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
14505 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
14506 // "restore" before this instruction (in Epilogue), we need to materialize it
14507 // in %i0.
14508 instruct tailjmpInd(iRegPdstNoScratch jump_target, rarg1RegP ex_oop) %{
14509   match(TailJump jump_target ex_oop);
14510   ins_cost(CALL_COST);
14511 
14512   format %{ "LD      R4_ARG2 = LR\n\t"
14513             "MTCTR   $jump_target\n\t"
14514             "BCTR     \t// TailJump, exception oop: $ex_oop" %}
14515   size(12);
14516   ins_encode %{
14517     __ ld(R4_ARG2/* issuing pc */, _abi0(lr), R1_SP);
14518     __ mtctr($jump_target$$Register);
14519     __ bctr();
14520   %}
14521   ins_pipe(pipe_class_call);
14522 %}
14523 
14524 // Forward exception.
14525 instruct ForwardExceptionjmp()
14526 %{
14527   match(ForwardException);
14528   ins_cost(CALL_COST);
14529 
14530   format %{ "Jmp     forward_exception_stub" %}
14531   ins_encode %{
14532     __ set_inst_mark();
14533     __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
14534     __ clear_inst_mark();
14535   %}
14536   ins_pipe(pipe_class_call);
14537 %}
14538 
14539 // Create exception oop: created by stack-crawling runtime code.
14540 // Created exception is now available to this handler, and is setup
14541 // just prior to jumping to this handler. No code emitted.
14542 instruct CreateException(rarg1RegP ex_oop) %{
14543   match(Set ex_oop (CreateEx));
14544   ins_cost(0);
14545 
14546   format %{ " -- \t// exception oop; no code emitted" %}
14547   size(0);
14548   ins_encode( /*empty*/ );
14549   ins_pipe(pipe_class_default);
14550 %}
14551 
14552 // Rethrow exception: The exception oop will come in the first
14553 // argument position. Then JUMP (not call) to the rethrow stub code.
14554 instruct RethrowException() %{
14555   match(Rethrow);
14556   ins_cost(CALL_COST);
14557 
14558   format %{ "Jmp     rethrow_stub" %}
14559   ins_encode %{
14560     __ set_inst_mark();
14561     __ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
14562     __ clear_inst_mark();
14563   %}
14564   ins_pipe(pipe_class_call);
14565 %}
14566 
14567 // Die now.
14568 instruct ShouldNotReachHere() %{
14569   match(Halt);
14570   ins_cost(CALL_COST);
14571 
14572   format %{ "ShouldNotReachHere" %}
14573   ins_encode %{
14574     if (is_reachable()) {
14575       __ stop(_halt_reason);
14576     }
14577   %}
14578   ins_pipe(pipe_class_default);
14579 %}
14580 
14581 // This name is KNOWN by the ADLC and cannot be changed.  The ADLC
14582 // forces a 'TypeRawPtr::BOTTOM' output type for this guy.
14583 // Get a DEF on threadRegP, no costs, no encoding, use
14584 // 'ins_should_rematerialize(true)' to avoid spilling.
14585 instruct tlsLoadP(threadRegP dst) %{
14586   match(Set dst (ThreadLocal));
14587   ins_cost(0);
14588 
14589   ins_should_rematerialize(true);
14590 
14591   format %{ " -- \t// $dst=Thread::current(), empty" %}
14592   size(0);
14593   ins_encode( /*empty*/ );
14594   ins_pipe(pipe_class_empty);
14595 %}
14596 
14597 //---Some PPC specific nodes---------------------------------------------------
14598 
14599 // Stop a group.
14600 instruct endGroup() %{
14601   ins_cost(0);
14602 
14603   ins_is_nop(true);
14604 
14605   format %{ "End Bundle (ori r1, r1, 0)" %}
14606   size(4);
14607   ins_encode %{
14608     __ endgroup();
14609   %}
14610   ins_pipe(pipe_class_default);
14611 %}
14612 
14613 // Nop instructions
14614 
14615 instruct fxNop() %{
14616   ins_cost(0);
14617 
14618   ins_is_nop(true);
14619 
14620   format %{ "fxNop" %}
14621   size(4);
14622   ins_encode %{
14623     __ nop();
14624   %}
14625   ins_pipe(pipe_class_default);
14626 %}
14627 
14628 instruct fpNop0() %{
14629   ins_cost(0);
14630 
14631   ins_is_nop(true);
14632 
14633   format %{ "fpNop0" %}
14634   size(4);
14635   ins_encode %{
14636     __ fpnop0();
14637   %}
14638   ins_pipe(pipe_class_default);
14639 %}
14640 
14641 instruct fpNop1() %{
14642   ins_cost(0);
14643 
14644   ins_is_nop(true);
14645 
14646   format %{ "fpNop1" %}
14647   size(4);
14648   ins_encode %{
14649     __ fpnop1();
14650   %}
14651   ins_pipe(pipe_class_default);
14652 %}
14653 
14654 instruct brNop0() %{
14655   ins_cost(0);
14656   size(4);
14657   format %{ "brNop0" %}
14658   ins_encode %{
14659     __ brnop0();
14660   %}
14661   ins_is_nop(true);
14662   ins_pipe(pipe_class_default);
14663 %}
14664 
14665 instruct brNop1() %{
14666   ins_cost(0);
14667 
14668   ins_is_nop(true);
14669 
14670   format %{ "brNop1" %}
14671   size(4);
14672   ins_encode %{
14673     __ brnop1();
14674   %}
14675   ins_pipe(pipe_class_default);
14676 %}
14677 
14678 instruct brNop2() %{
14679   ins_cost(0);
14680 
14681   ins_is_nop(true);
14682 
14683   format %{ "brNop2" %}
14684   size(4);
14685   ins_encode %{
14686     __ brnop2();
14687   %}
14688   ins_pipe(pipe_class_default);
14689 %}
14690 
14691 instruct cacheWB(indirect addr)
14692 %{
14693   match(CacheWB addr);
14694 
14695   ins_cost(100);
14696   format %{ "cache writeback, address = $addr" %}
14697   ins_encode %{
14698     assert($addr->index_position() < 0, "should be");
14699     assert($addr$$disp == 0, "should be");
14700     __ cache_wb(Address($addr$$base$$Register));
14701   %}
14702   ins_pipe(pipe_class_default);
14703 %}
14704 
14705 instruct cacheWBPreSync()
14706 %{
14707   match(CacheWBPreSync);
14708 
14709   ins_cost(0);
14710   format %{ "cache writeback presync" %}
14711   ins_encode %{
14712     __ cache_wbsync(true);
14713   %}
14714   ins_pipe(pipe_class_default);
14715 %}
14716 
14717 instruct cacheWBPostSync()
14718 %{
14719   match(CacheWBPostSync);
14720 
14721   ins_cost(100);
14722   format %{ "cache writeback postsync" %}
14723   ins_encode %{
14724     __ cache_wbsync(false);
14725   %}
14726   ins_pipe(pipe_class_default);
14727 %}
14728 
14729 //----------PEEPHOLE RULES-----------------------------------------------------
14730 // These must follow all instruction definitions as they use the names
14731 // defined in the instructions definitions.
14732 //
14733 // peepmatch ( root_instr_name [preceeding_instruction]* );
14734 //
14735 // peepconstraint %{
14736 // (instruction_number.operand_name relational_op instruction_number.operand_name
14737 //  [, ...] );
14738 // // instruction numbers are zero-based using left to right order in peepmatch
14739 //
14740 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14741 // // provide an instruction_number.operand_name for each operand that appears
14742 // // in the replacement instruction's match rule
14743 //
14744 // ---------VM FLAGS---------------------------------------------------------
14745 //
14746 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14747 //
14748 // Each peephole rule is given an identifying number starting with zero and
14749 // increasing by one in the order seen by the parser. An individual peephole
14750 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14751 // on the command-line.
14752 //
14753 // ---------CURRENT LIMITATIONS----------------------------------------------
14754 //
14755 // Only match adjacent instructions in same basic block
14756 // Only equality constraints
14757 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14758 // Only one replacement instruction
14759 //
14760 // ---------EXAMPLE----------------------------------------------------------
14761 //
14762 // // pertinent parts of existing instructions in architecture description
14763 // instruct movI(eRegI dst, eRegI src) %{
14764 //   match(Set dst (CopyI src));
14765 // %}
14766 //
14767 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14768 //   match(Set dst (AddI dst src));
14769 //   effect(KILL cr);
14770 // %}
14771 //
14772 // // Change (inc mov) to lea
14773 // peephole %{
14774 //   // increment preceded by register-register move
14775 //   peepmatch ( incI_eReg movI );
14776 //   // require that the destination register of the increment
14777 //   // match the destination register of the move
14778 //   peepconstraint ( 0.dst == 1.dst );
14779 //   // construct a replacement instruction that sets
14780 //   // the destination to ( move's source register + one )
14781 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14782 // %}
14783 //
14784 // Implementation no longer uses movX instructions since
14785 // machine-independent system no longer uses CopyX nodes.
14786 //
14787 // peephole %{
14788 //   peepmatch ( incI_eReg movI );
14789 //   peepconstraint ( 0.dst == 1.dst );
14790 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14791 // %}
14792 //
14793 // peephole %{
14794 //   peepmatch ( decI_eReg movI );
14795 //   peepconstraint ( 0.dst == 1.dst );
14796 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14797 // %}
14798 //
14799 // peephole %{
14800 //   peepmatch ( addI_eReg_imm movI );
14801 //   peepconstraint ( 0.dst == 1.dst );
14802 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14803 // %}
14804 //
14805 // peephole %{
14806 //   peepmatch ( addP_eReg_imm movP );
14807 //   peepconstraint ( 0.dst == 1.dst );
14808 //   peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14809 // %}
14810 
14811 // // Change load of spilled value to only a spill
14812 // instruct storeI(memory mem, eRegI src) %{
14813 //   match(Set mem (StoreI mem src));
14814 // %}
14815 //
14816 // instruct loadI(eRegI dst, memory mem) %{
14817 //   match(Set dst (LoadI mem));
14818 // %}
14819 //
14820 peephole %{
14821   peepmatch ( loadI storeI );
14822   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14823   peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14824 %}
14825 
14826 peephole %{
14827   peepmatch ( loadL storeL );
14828   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14829   peepreplace ( storeL( 1.mem 1.mem 1.src ) );
14830 %}
14831 
14832 peephole %{
14833   peepmatch ( loadP storeP );
14834   peepconstraint ( 1.src == 0.dst, 1.dst == 0.mem );
14835   peepreplace ( storeP( 1.dst 1.dst 1.src ) );
14836 %}
14837 
14838 //----------SMARTSPILL RULES---------------------------------------------------
14839 // These must follow all instruction definitions as they use the names
14840 // defined in the instructions definitions.