< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page

        

@@ -1960,11 +1960,11 @@
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
   st->print_cr("# MachUEPNode");
   if (UseCompressedClassPointers) {
     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
-    if (CompressedKlassPointers::shift() != 0) {
+    if (Universe::narrow_klass_shift() != 0) {
       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     }
   } else {
    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
   }

@@ -2181,27 +2181,27 @@
 // If false, final_graph_reshaping() forces the decode behind the Cmp
 // for a NullCheck. The matcher matches the Decode node into a register.
 // Implicit_null_check optimization moves the Decode along with the
 // memory operation back up before the NullCheck.
 bool Matcher::narrow_oop_use_complex_address() {
-  return CompressedOops::shift() == 0;
+  return Universe::narrow_oop_shift() == 0;
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
 // TODO
 // decide whether we need to set this to true
   return false;
 }
 
 bool Matcher::const_oop_prefer_decode() {
   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
-  return CompressedOops::base() == NULL;
+  return Universe::narrow_oop_base() == NULL;
 }
 
 bool Matcher::const_klass_prefer_decode() {
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  return CompressedKlassPointers::base() == NULL;
+  return Universe::narrow_klass_base() == NULL;
 }
 
 // Is it better to copy float constants, or load them directly from
 // memory?  Intel can load a float constant from a direct address,
 // requiring no extra registers.  Most RISCs will have to materialize

@@ -3930,24 +3930,22 @@
   interface(CONST_INTER);
 %}
 
 operand immL_bitmask()
 %{
-  predicate((n->get_long() != 0)
-            && ((n->get_long() & 0xc000000000000000l) == 0)
+  predicate(((n->get_long() & 0xc000000000000000l) == 0)
             && is_power_of_2(n->get_long() + 1));
   match(ConL);
 
   op_cost(0);
   format %{ %}
   interface(CONST_INTER);
 %}
 
 operand immI_bitmask()
 %{
-  predicate((n->get_int() != 0)
-            && ((n->get_int() & 0xc0000000) == 0)
+  predicate(((n->get_int() & 0xc0000000) == 0)
             && is_power_of_2(n->get_int() + 1));
   match(ConI);
 
   op_cost(0);
   format %{ %}

@@ -5042,11 +5040,11 @@
   %}
 %}
 
 operand indirectN(iRegN reg)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(DecodeN reg);
   op_cost(0);
   format %{ "[$reg]\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5057,11 +5055,11 @@
   %}
 %}
 
 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 %{
-  predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
+  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
   op_cost(0);
   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5072,11 +5070,11 @@
   %}
 %}
 
 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 %{
-  predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
+  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL lreg scale));
   op_cost(0);
   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5087,11 +5085,11 @@
   %}
 %}
 
 operand indIndexI2LN(iRegN reg, iRegI ireg)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (ConvI2L ireg));
   op_cost(0);
   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5102,11 +5100,11 @@
   %}
 %}
 
 operand indIndexN(iRegN reg, iRegL lreg)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) lreg);
   op_cost(0);
   format %{ "$reg, $lreg\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5117,11 +5115,11 @@
   %}
 %}
 
 operand indOffIN(iRegN reg, immIOffset off)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
   op_cost(0);
   format %{ "[$reg, $off]\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -5132,11 +5130,11 @@
   %}
 %}
 
 operand indOffLN(iRegN reg, immLoffset off)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
   op_cost(0);
   format %{ "[$reg, $off]\t# narrow" %}
   interface(MEMORY_INTER) %{

@@ -7037,12 +7035,12 @@
 %}
 
 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
 %{
   match(Set mem (StoreN mem zero));
-  predicate(CompressedOops::base() == NULL &&
-            CompressedKlassPointers::base() == NULL &&
+  predicate(Universe::narrow_oop_base() == NULL &&
+            Universe::narrow_klass_base() == NULL &&
             (!needs_releasing_store(n)));
 
   ins_cost(INSN_COST);
   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
 

@@ -7820,11 +7818,11 @@
 
 // Convert compressed oop into int for vectors alignment masking
 // in case of 32bit oops (heap < 4Gb).
 instruct convN2I(iRegINoSp dst, iRegN src)
 %{
-  predicate(CompressedOops::shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0);
   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 
   ins_cost(INSN_COST);
   format %{ "mov dst, $src\t# compressed ptr -> int" %}
   ins_encode %{

@@ -11341,15 +11339,18 @@
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
 %{
   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what sbfm can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 63
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
+
   ins_cost(INSN_COST * 2);
   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
   ins_encode %{
-    int lshift = $lshift_count$$constant & 63;
-    int rshift = $rshift_count$$constant & 63;
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
     int s = 63 - lshift;
     int r = (rshift - lshift) & 63;
     __ sbfm(as_Register($dst$$reg),
             as_Register($src$$reg),
             r, s);

@@ -11361,15 +11362,18 @@
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
 %{
   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what sbfmw can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 31
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
+
   ins_cost(INSN_COST * 2);
   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
   ins_encode %{
-    int lshift = $lshift_count$$constant & 31;
-    int rshift = $rshift_count$$constant & 31;
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
     int s = 31 - lshift;
     int r = (rshift - lshift) & 31;
     __ sbfmw(as_Register($dst$$reg),
             as_Register($src$$reg),
             r, s);

@@ -11381,15 +11385,18 @@
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
 %{
   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what ubfm can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 63
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
+
   ins_cost(INSN_COST * 2);
   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
   ins_encode %{
-    int lshift = $lshift_count$$constant & 63;
-    int rshift = $rshift_count$$constant & 63;
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
     int s = 63 - lshift;
     int r = (rshift - lshift) & 63;
     __ ubfm(as_Register($dst$$reg),
             as_Register($src$$reg),
             r, s);

@@ -11401,15 +11408,18 @@
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
 %{
   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what ubfmw can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 31
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
+
   ins_cost(INSN_COST * 2);
   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
   ins_encode %{
-    int lshift = $lshift_count$$constant & 31;
-    int rshift = $rshift_count$$constant & 31;
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
     int s = 31 - lshift;
     int r = (rshift - lshift) & 31;
     __ ubfmw(as_Register($dst$$reg),
             as_Register($src$$reg),
             r, s);

@@ -11420,36 +11430,32 @@
 // Bitfield extract with shift & mask
 
 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (AndI (URShiftI src rshift) mask));
-  // Make sure we are not going to exceed what ubfxw can do.
-  predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 
   ins_cost(INSN_COST);
   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant & 31;
+    int rshift = $rshift$$constant;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfxw(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}
 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
 %{
   match(Set dst (AndL (URShiftL src rshift) mask));
-  // Make sure we are not going to exceed what ubfx can do.
-  predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
 
   ins_cost(INSN_COST);
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant & 63;
+    int rshift = $rshift$$constant;
     long mask = $mask$$constant;
-    int width = exact_log2_long(mask+1);
+    int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}

@@ -11457,17 +11463,15 @@
 // We can use ubfx when extending an And with a mask when we know mask
 // is positive.  We know that because immI_bitmask guarantees it.
 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
-  // Make sure we are not going to exceed what ubfxw can do.
-  predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 
   ins_cost(INSN_COST * 2);
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant & 31;
+    int rshift = $rshift$$constant;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}

@@ -11477,16 +11481,17 @@
 // We can use ubfiz when masking by a positive number and then left shifting the result.
 // We know that the mask is positive because immI_bitmask guarantees it.
 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 %{
   match(Set dst (LShiftI (AndI src mask) lshift));
-  predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
 
   ins_cost(INSN_COST);
   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
   ins_encode %{
-    int lshift = $lshift$$constant & 31;
+    int lshift = $lshift$$constant;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfizw(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}

@@ -11495,34 +11500,36 @@
 // We can use ubfiz when masking by a positive number and then left shifting the result.
 // We know that the mask is positive because immL_bitmask guarantees it.
 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
 %{
   match(Set dst (LShiftL (AndL src mask) lshift));
-  predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
+  predicate((unsigned int)n->in(2)->get_int() <= 63 &&
+    (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
 
   ins_cost(INSN_COST);
   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
   ins_encode %{
-    int lshift = $lshift$$constant & 63;
+    int lshift = $lshift$$constant;
     long mask = $mask$$constant;
-    int width = exact_log2_long(mask+1);
+    int width = exact_log2(mask+1);
     __ ubfiz(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}
 
 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 %{
-  match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
-  predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
+  match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
 
   ins_cost(INSN_COST);
   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
   ins_encode %{
-    int lshift = $lshift$$constant & 63;
+    int lshift = $lshift$$constant;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfiz(as_Register($dst$$reg),
              as_Register($src$$reg), lshift, width);
   %}

@@ -11532,11 +11539,11 @@
 // Rotations
 
 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
-  predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
 
   ins_cost(INSN_COST);
   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 
   ins_encode %{

@@ -11547,11 +11554,11 @@
 %}
 
 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
-  predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
 
   ins_cost(INSN_COST);
   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 
   ins_encode %{

@@ -11562,11 +11569,11 @@
 %}
 
 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
-  predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
 
   ins_cost(INSN_COST);
   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 
   ins_encode %{

@@ -11577,11 +11584,11 @@
 %}
 
 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
-  predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
 
   ins_cost(INSN_COST);
   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 
   ins_encode %{

@@ -14044,67 +14051,59 @@
 %}
 
 // ============================================================================
 // Max and Min
 
-instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
+instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
 %{
-  effect( DEF dst, USE src1, USE src2, USE cr );
+  match(Set dst (MinI src1 src2));
 
-  ins_cost(INSN_COST * 2);
-  format %{ "cselw $dst, $src1, $src2 lt\t"  %}
+  effect(DEF dst, USE src1, USE src2, KILL cr);
+  size(8);
+
+  ins_cost(INSN_COST * 3);
+  format %{
+    "cmpw $src1 $src2\t signed int\n\t"
+    "cselw $dst, $src1, $src2 lt\t"
+  %}
 
   ins_encode %{
+    __ cmpw(as_Register($src1$$reg),
+            as_Register($src2$$reg));
     __ cselw(as_Register($dst$$reg),
              as_Register($src1$$reg),
              as_Register($src2$$reg),
              Assembler::LT);
   %}
 
-  ins_pipe(icond_reg_reg);
-%}
-
-instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
-%{
-  match(Set dst (MinI src1 src2));
-  ins_cost(INSN_COST * 3);
-
-  expand %{
-    rFlagsReg cr;
-    compI_reg_reg(cr, src1, src2);
-    cmovI_reg_reg_lt(dst, src1, src2, cr);
-  %}
-
+  ins_pipe(ialu_reg_reg);
 %}
 // FROM HERE
 
-instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
+instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
 %{
-  effect( DEF dst, USE src1, USE src2, USE cr );
+  match(Set dst (MaxI src1 src2));
 
-  ins_cost(INSN_COST * 2);
-  format %{ "cselw $dst, $src1, $src2 gt\t"  %}
+  effect(DEF dst, USE src1, USE src2, KILL cr);
+  size(8);
+
+  ins_cost(INSN_COST * 3);
+  format %{
+    "cmpw $src1 $src2\t signed int\n\t"
+    "cselw $dst, $src1, $src2 gt\t"
+  %}
 
   ins_encode %{
+    __ cmpw(as_Register($src1$$reg),
+            as_Register($src2$$reg));
     __ cselw(as_Register($dst$$reg),
              as_Register($src1$$reg),
              as_Register($src2$$reg),
              Assembler::GT);
   %}
 
-  ins_pipe(icond_reg_reg);
-%}
-
-instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
-%{
-  match(Set dst (MaxI src1 src2));
-  ins_cost(INSN_COST * 3);
-  expand %{
-    rFlagsReg cr;
-    compI_reg_reg(cr, src1, src2);
-    cmovI_reg_reg_gt(dst, src1, src2, cr);
-  %}
+  ins_pipe(ialu_reg_reg);
 %}
 
 // ============================================================================
 // Branch Instructions
 
< prev index next >