< prev index next >

src/hotspot/cpu/x86/gc/z/z_x86_64.ad

Print this page




   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 source_hpp %{
  25 
  26 #include "gc/z/c2/zBarrierSetC2.hpp"
  27 
  28 %}
  29 
  30 source %{
  31 
  32 #include "gc/z/zBarrierSetAssembler.hpp"
  33 
  34 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
  35   assert(dst != rsp, "Invalid register");
  36   assert(dst != r15, "Invalid register");
  37 
  38   const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
  39                             : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
  40   __ lea(dst, src);
  41   __ call(RuntimeAddress(stub));
  42 }
  43 
  44 %}
  45 
  46 // For XMM and YMM enabled processors
  47 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
  48                                       rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  49                                       rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  50                                       rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  51                                       rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
  52 
  53   match(Set dst (LoadBarrierSlowReg src));
  54   predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
  55 
  56   effect(DEF dst, KILL cr,
  57          KILL x0, KILL x1, KILL x2, KILL x3,
  58          KILL x4, KILL x5, KILL x6, KILL x7,
  59          KILL x8, KILL x9, KILL x10, KILL x11,
  60          KILL x12, KILL x13, KILL x14, KILL x15);
  61 
  62   format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
  63 
  64   ins_encode %{
  65     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  66   %}
  67 
  68   ins_pipe(pipe_slow);
  69 %}
  70 
  71 // For ZMM enabled processors
  72 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
  73                                 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  74                                 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  75                                 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  76                                 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
  77                                 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
  78                                 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
  79                                 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
  80                                 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
  81 
  82   match(Set dst (LoadBarrierSlowReg src));
  83   predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
  84 
  85   effect(DEF dst, KILL cr,
  86          KILL x0, KILL x1, KILL x2, KILL x3,
  87          KILL x4, KILL x5, KILL x6, KILL x7,
  88          KILL x8, KILL x9, KILL x10, KILL x11,
  89          KILL x12, KILL x13, KILL x14, KILL x15,
  90          KILL x16, KILL x17, KILL x18, KILL x19,
  91          KILL x20, KILL x21, KILL x22, KILL x23,
  92          KILL x24, KILL x25, KILL x26, KILL x27,
  93          KILL x28, KILL x29, KILL x30, KILL x31);
  94 
  95   format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
  96 
  97   ins_encode %{
  98     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  99   %}
 100 
 101   ins_pipe(pipe_slow);
 102 %}
 103 
 104 // For XMM and YMM enabled processors
 105 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
 106                                           rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 107                                           rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 108                                           rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 109                                           rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 110 
 111   match(Set dst (LoadBarrierSlowReg src));
 112   predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
 113 
 114   effect(DEF dst, KILL cr,
 115          KILL x0, KILL x1, KILL x2, KILL x3,
 116          KILL x4, KILL x5, KILL x6, KILL x7,
 117          KILL x8, KILL x9, KILL x10, KILL x11,
 118          KILL x12, KILL x13, KILL x14, KILL x15);
 119 
 120   format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
 121 
 122   ins_encode %{
 123     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 124   %}
 125 
 126   ins_pipe(pipe_slow);
 127 %}
 128 
 129 // For ZMM enabled processors
 130 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
 131                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 132                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 133                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 134                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
 135                                     rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
 136                                     rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
 137                                     rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
 138                                     rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 139 
 140   match(Set dst (LoadBarrierSlowReg src));
 141   predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
 142 
 143   effect(DEF dst, KILL cr,
 144          KILL x0, KILL x1, KILL x2, KILL x3,
 145          KILL x4, KILL x5, KILL x6, KILL x7,
 146          KILL x8, KILL x9, KILL x10, KILL x11,
 147          KILL x12, KILL x13, KILL x14, KILL x15,
 148          KILL x16, KILL x17, KILL x18, KILL x19,
 149          KILL x20, KILL x21, KILL x22, KILL x23,
 150          KILL x24, KILL x25, KILL x26, KILL x27,
 151          KILL x28, KILL x29, KILL x30, KILL x31);
 152 
 153   format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
 154 
 155   ins_encode %{
 156     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 157   %}
 158 
 159   ins_pipe(pipe_slow);
 160 %}
 161 
 162 // Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
 163 // but doesn't affect output.
 164 
 165 instruct z_compareAndExchangeP(
 166         memory mem_ptr,
 167         rax_RegP oldval, rRegP newval, rRegP keepalive,
 168         rFlagsReg cr) %{
 169     predicate(VM_Version::supports_cx8());
 170     match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval)));
 171     effect(KILL cr);
 172 
 173     format %{ "cmpxchgq $mem_ptr,$newval\t# "
 174               "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
 175     opcode(0x0F, 0xB1);
 176     ins_encode(lock_prefix,
 177             REX_reg_mem_wide(newval, mem_ptr),
 178             OpcP, OpcS,
 179             reg_mem(newval, mem_ptr)  // lock cmpxchg
 180     );
 181     ins_pipe( pipe_cmpxchg );
 182 %}
 183 
 184 instruct z_compareAndSwapP(rRegI res,
 185                          memory mem_ptr,
 186                          rax_RegP oldval, rRegP newval, rRegP keepalive,
 187                          rFlagsReg cr) %{
 188   predicate(VM_Version::supports_cx8());
 189   match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
 190   match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
 191   effect(KILL cr, KILL oldval);
 192 
 193   format %{ "cmpxchgq $mem_ptr,$newval\t# "
 194             "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
 195             "sete    $res\n\t"
 196             "movzbl  $res, $res" %}
 197   opcode(0x0F, 0xB1);
 198   ins_encode(lock_prefix,
 199           REX_reg_mem_wide(newval, mem_ptr),
 200           OpcP, OpcS,
 201           reg_mem(newval, mem_ptr),
 202           REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
 203           REX_reg_breg(res, res), // movzbl
 204           Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
 205   ins_pipe( pipe_cmpxchg );
 206 %}
 207 
 208 instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{
 209   match(Set newval (ZGetAndSetP mem (Binary newval keepalive)));
 210   format %{ "XCHGQ  $newval,[$mem]" %}
 211   ins_encode %{
 212     __ xchgq($newval$$Register, $mem$$Address);
 213   %}
 214   ins_pipe( pipe_cmpxchg );
 215 %}


   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 






  24 source %{
  25 
  26 #include "gc/z/zBarrierSetAssembler.hpp"
  27 
  28 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
  29   assert(dst != rsp, "Invalid register");
  30   assert(dst != r15, "Invalid register");
  31 
  32   const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
  33                             : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
  34   __ lea(dst, src);
  35   __ call(RuntimeAddress(stub));
  36 }
  37 
  38 %}
  39 
  40 // For XMM and YMM enabled processors
  41 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
  42                                       rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  43                                       rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  44                                       rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  45                                       rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
  46 
  47   match(Set dst (LoadBarrierSlowReg src));
  48   predicate(UseAVX <= 2);
  49 
  50   effect(DEF dst, KILL cr,
  51          KILL x0, KILL x1, KILL x2, KILL x3,
  52          KILL x4, KILL x5, KILL x6, KILL x7,
  53          KILL x8, KILL x9, KILL x10, KILL x11,
  54          KILL x12, KILL x13, KILL x14, KILL x15);
  55 
  56   format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
  57 
  58   ins_encode %{
  59     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  60   %}
  61 
  62   ins_pipe(pipe_slow);
  63 %}
  64 
  65 // For ZMM enabled processors
  66 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
  67                                 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  68                                 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  69                                 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  70                                 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
  71                                 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
  72                                 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
  73                                 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
  74                                 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
  75 
  76   match(Set dst (LoadBarrierSlowReg src));
  77   predicate(UseAVX == 3);
  78 
  79   effect(DEF dst, KILL cr,
  80          KILL x0, KILL x1, KILL x2, KILL x3,
  81          KILL x4, KILL x5, KILL x6, KILL x7,
  82          KILL x8, KILL x9, KILL x10, KILL x11,
  83          KILL x12, KILL x13, KILL x14, KILL x15,
  84          KILL x16, KILL x17, KILL x18, KILL x19,
  85          KILL x20, KILL x21, KILL x22, KILL x23,
  86          KILL x24, KILL x25, KILL x26, KILL x27,
  87          KILL x28, KILL x29, KILL x30, KILL x31);
  88 
  89   format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
  90 
  91   ins_encode %{
  92     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  93   %}
  94 
  95   ins_pipe(pipe_slow);
  96 %}
  97 
  98 // For XMM and YMM enabled processors
  99 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
 100                                           rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 101                                           rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 102                                           rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 103                                           rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 104 
 105   match(Set dst (LoadBarrierWeakSlowReg src));
 106   predicate(UseAVX <= 2);
 107 
 108   effect(DEF dst, KILL cr,
 109          KILL x0, KILL x1, KILL x2, KILL x3,
 110          KILL x4, KILL x5, KILL x6, KILL x7,
 111          KILL x8, KILL x9, KILL x10, KILL x11,
 112          KILL x12, KILL x13, KILL x14, KILL x15);
 113 
 114   format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
 115 
 116   ins_encode %{
 117     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 118   %}
 119 
 120   ins_pipe(pipe_slow);
 121 %}
 122 
 123 // For ZMM enabled processors
 124 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
 125                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 126                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 127                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 128                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
 129                                     rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
 130                                     rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
 131                                     rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
 132                                     rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 133 
 134   match(Set dst (LoadBarrierWeakSlowReg src));
 135   predicate(UseAVX == 3);
 136 
 137   effect(DEF dst, KILL cr,
 138          KILL x0, KILL x1, KILL x2, KILL x3,
 139          KILL x4, KILL x5, KILL x6, KILL x7,
 140          KILL x8, KILL x9, KILL x10, KILL x11,
 141          KILL x12, KILL x13, KILL x14, KILL x15,
 142          KILL x16, KILL x17, KILL x18, KILL x19,
 143          KILL x20, KILL x21, KILL x22, KILL x23,
 144          KILL x24, KILL x25, KILL x26, KILL x27,
 145          KILL x28, KILL x29, KILL x30, KILL x31);
 146 
 147   format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
 148 
 149   ins_encode %{
 150     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 151   %}
 152 
 153   ins_pipe(pipe_slow);























































 154 %}
< prev index next >