1 // 2 // Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // AMD64 Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 32 register %{ 33 //----------Architecture Description Register Definitions---------------------- 34 // General Registers 35 // "reg_def" name ( register save type, C convention save type, 36 // ideal register type, encoding ); 37 // Register Save Types: 38 // 39 // NS = No-Save: The register allocator assumes that these registers 40 // can be used without saving upon entry to the method, & 41 // that they do not need to be saved at call sites. 42 // 43 // SOC = Save-On-Call: The register allocator assumes that these registers 44 // can be used without saving upon entry to the method, 45 // but that they must be saved at call sites. 46 // 47 // SOE = Save-On-Entry: The register allocator assumes that these registers 48 // must be saved before using them upon entry to the 49 // method, but they do not need to be saved at call 50 // sites. 51 // 52 // AS = Always-Save: The register allocator assumes that these registers 53 // must be saved before using them upon entry to the 54 // method, & that they must be saved at call sites. 55 // 56 // Ideal Register Type is used to determine how to save & restore a 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 59 // 60 // The encoding number is the actual bit-pattern placed into the opcodes. 61 62 // General Registers 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when 64 // used as byte registers) 65 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code 67 // Turn off SOE in java-code due to frequent use of uncommon-traps. 68 // Now that allocator is better, turn on RSI and RDI as SOE registers. 69 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg()); 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next()); 72 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg()); 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next()); 75 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg()); 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next()); 78 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg()); 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next()); 81 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg()); 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next()); 84 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg()); 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next()); 88 89 #ifdef _WIN64 90 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg()); 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next()); 93 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg()); 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next()); 96 97 #else 98 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg()); 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next()); 101 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg()); 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next()); 104 105 #endif 106 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg()); 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next()); 109 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg()); 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next()); 112 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg()); 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next()); 115 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg()); 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next()); 118 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg()); 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next()); 121 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg()); 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next()); 124 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg()); 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next()); 127 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg()); 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next()); 130 131 132 // Floating Point Registers 133 134 // Specify priority of register selection within phases of register 135 // allocation. Highest priority is first. A useful heuristic is to 136 // give registers a low priority when they are required by machine 137 // instructions, like EAX and EDX on I486, and choose no-save registers 138 // before save-on-call, & save-on-call before save-on-entry. Registers 139 // which participate in fixed calling sequences should come last. 140 // Registers which are used as pairs must fall on an even boundary. 141 142 alloc_class chunk0(R10, R10_H, 143 R11, R11_H, 144 R8, R8_H, 145 R9, R9_H, 146 R12, R12_H, 147 RCX, RCX_H, 148 RBX, RBX_H, 149 RDI, RDI_H, 150 RDX, RDX_H, 151 RSI, RSI_H, 152 RAX, RAX_H, 153 RBP, RBP_H, 154 R13, R13_H, 155 R14, R14_H, 156 R15, R15_H, 157 RSP, RSP_H); 158 159 160 //----------Architecture Description Register Classes-------------------------- 161 // Several register classes are automatically defined based upon information in 162 // this architecture description. 163 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ ) 164 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 165 // 166 167 // Empty register class. 168 reg_class no_reg(); 169 170 // Class for all pointer/long registers 171 reg_class all_reg(RAX, RAX_H, 172 RDX, RDX_H, 173 RBP, RBP_H, 174 RDI, RDI_H, 175 RSI, RSI_H, 176 RCX, RCX_H, 177 RBX, RBX_H, 178 RSP, RSP_H, 179 R8, R8_H, 180 R9, R9_H, 181 R10, R10_H, 182 R11, R11_H, 183 R12, R12_H, 184 R13, R13_H, 185 R14, R14_H, 186 R15, R15_H); 187 188 // Class for all int registers 189 reg_class all_int_reg(RAX 190 RDX, 191 RBP, 192 RDI, 193 RSI, 194 RCX, 195 RBX, 196 R8, 197 R9, 198 R10, 199 R11, 200 R12, 201 R13, 202 R14); 203 204 // Class for all pointer registers 205 reg_class any_reg %{ 206 return _ANY_REG_mask; 207 %} 208 209 // Class for all pointer registers (excluding RSP) 210 reg_class ptr_reg %{ 211 return _PTR_REG_mask; 212 %} 213 214 // Class for all pointer registers (excluding RSP and RBP) 215 reg_class ptr_reg_no_rbp %{ 216 return _PTR_REG_NO_RBP_mask; 217 %} 218 219 // Class for all pointer registers (excluding RAX and RSP) 220 reg_class ptr_no_rax_reg %{ 221 return _PTR_NO_RAX_REG_mask; 222 %} 223 224 // Class for all pointer registers (excluding RAX, RBX, and RSP) 225 reg_class ptr_no_rax_rbx_reg %{ 226 return _PTR_NO_RAX_RBX_REG_mask; 227 %} 228 229 // Class for all long registers (excluding RSP) 230 reg_class long_reg %{ 231 return _LONG_REG_mask; 232 %} 233 234 // Class for all long registers (excluding RAX, RDX and RSP) 235 reg_class long_no_rax_rdx_reg %{ 236 return _LONG_NO_RAX_RDX_REG_mask; 237 %} 238 239 // Class for all long registers (excluding RCX and RSP) 240 reg_class long_no_rcx_reg %{ 241 return _LONG_NO_RCX_REG_mask; 242 %} 243 244 // Class for all int registers (excluding RSP) 245 reg_class int_reg %{ 246 return _INT_REG_mask; 247 %} 248 249 // Class for all int registers (excluding RAX, RDX, and RSP) 250 reg_class int_no_rax_rdx_reg %{ 251 return _INT_NO_RAX_RDX_REG_mask; 252 %} 253 254 // Class for all int registers (excluding RCX and RSP) 255 reg_class int_no_rcx_reg %{ 256 return _INT_NO_RCX_REG_mask; 257 %} 258 259 // Singleton class for RAX pointer register 260 reg_class ptr_rax_reg(RAX, RAX_H); 261 262 // Singleton class for RBX pointer register 263 reg_class ptr_rbx_reg(RBX, RBX_H); 264 265 // Singleton class for RSI pointer register 266 reg_class ptr_rsi_reg(RSI, RSI_H); 267 268 // Singleton class for RBP pointer register 269 reg_class ptr_rbp_reg(RBP, RBP_H); 270 271 // Singleton class for RDI pointer register 272 reg_class ptr_rdi_reg(RDI, RDI_H); 273 274 // Singleton class for stack pointer 275 reg_class ptr_rsp_reg(RSP, RSP_H); 276 277 // Singleton class for TLS pointer 278 reg_class ptr_r15_reg(R15, R15_H); 279 280 // Singleton class for RAX long register 281 reg_class long_rax_reg(RAX, RAX_H); 282 283 // Singleton class for RCX long register 284 reg_class long_rcx_reg(RCX, RCX_H); 285 286 // Singleton class for RDX long register 287 reg_class long_rdx_reg(RDX, RDX_H); 288 289 // Singleton class for RAX int register 290 reg_class int_rax_reg(RAX); 291 292 // Singleton class for RBX int register 293 reg_class int_rbx_reg(RBX); 294 295 // Singleton class for RCX int register 296 reg_class int_rcx_reg(RCX); 297 298 // Singleton class for RCX int register 299 reg_class int_rdx_reg(RDX); 300 301 // Singleton class for RCX int register 302 reg_class int_rdi_reg(RDI); 303 304 // Singleton class for instruction pointer 305 // reg_class ip_reg(RIP); 306 307 %} 308 309 //----------SOURCE BLOCK------------------------------------------------------- 310 // This is a block of C++ code which provides values, functions, and 311 // definitions necessary in the rest of the architecture description 312 source_hpp %{ 313 314 extern RegMask _ANY_REG_mask; 315 extern RegMask _PTR_REG_mask; 316 extern RegMask _PTR_REG_NO_RBP_mask; 317 extern RegMask _PTR_NO_RAX_REG_mask; 318 extern RegMask _PTR_NO_RAX_RBX_REG_mask; 319 extern RegMask _LONG_REG_mask; 320 extern RegMask _LONG_NO_RAX_RDX_REG_mask; 321 extern RegMask _LONG_NO_RCX_REG_mask; 322 extern RegMask _INT_REG_mask; 323 extern RegMask _INT_NO_RAX_RDX_REG_mask; 324 extern RegMask _INT_NO_RCX_REG_mask; 325 326 extern RegMask _STACK_OR_PTR_REG_mask; 327 extern RegMask _STACK_OR_LONG_REG_mask; 328 extern RegMask _STACK_OR_INT_REG_mask; 329 330 inline const RegMask& STACK_OR_PTR_REG_mask() { return _STACK_OR_PTR_REG_mask; } 331 inline const RegMask& STACK_OR_LONG_REG_mask() { return _STACK_OR_LONG_REG_mask; } 332 inline const RegMask& STACK_OR_INT_REG_mask() { return _STACK_OR_INT_REG_mask; } 333 334 %} 335 336 source %{ 337 #define RELOC_IMM64 Assembler::imm_operand 338 #define RELOC_DISP32 Assembler::disp32_operand 339 340 #define __ _masm. 341 342 RegMask _ANY_REG_mask; 343 RegMask _PTR_REG_mask; 344 RegMask _PTR_REG_NO_RBP_mask; 345 RegMask _PTR_NO_RAX_REG_mask; 346 RegMask _PTR_NO_RAX_RBX_REG_mask; 347 RegMask _LONG_REG_mask; 348 RegMask _LONG_NO_RAX_RDX_REG_mask; 349 RegMask _LONG_NO_RCX_REG_mask; 350 RegMask _INT_REG_mask; 351 RegMask _INT_NO_RAX_RDX_REG_mask; 352 RegMask _INT_NO_RCX_REG_mask; 353 RegMask _STACK_OR_PTR_REG_mask; 354 RegMask _STACK_OR_LONG_REG_mask; 355 RegMask _STACK_OR_INT_REG_mask; 356 357 static bool need_r12_heapbase() { 358 return UseCompressedOops; 359 } 360 361 void reg_mask_init() { 362 // _ALL_REG_mask is generated by adlc from the all_reg register class below. 363 // We derive a number of subsets from it. 364 _ANY_REG_mask = _ALL_REG_mask; 365 366 if (PreserveFramePointer) { 367 _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg())); 368 _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next())); 369 } 370 if (need_r12_heapbase()) { 371 _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg())); 372 _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()->next())); 373 } 374 375 _PTR_REG_mask = _ANY_REG_mask; 376 _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg())); 377 _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next())); 378 _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg())); 379 _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()->next())); 380 381 _STACK_OR_PTR_REG_mask = _PTR_REG_mask; 382 _STACK_OR_PTR_REG_mask.OR(STACK_OR_STACK_SLOTS_mask()); 383 384 _PTR_REG_NO_RBP_mask = _PTR_REG_mask; 385 _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg())); 386 _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next())); 387 388 _PTR_NO_RAX_REG_mask = _PTR_REG_mask; 389 _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg())); 390 _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next())); 391 392 _PTR_NO_RAX_RBX_REG_mask = _PTR_NO_RAX_REG_mask; 393 _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg())); 394 _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next())); 395 396 _LONG_REG_mask = _PTR_REG_mask; 397 _STACK_OR_LONG_REG_mask = _LONG_REG_mask; 398 _STACK_OR_LONG_REG_mask.OR(STACK_OR_STACK_SLOTS_mask()); 399 400 _LONG_NO_RAX_RDX_REG_mask = _LONG_REG_mask; 401 _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg())); 402 _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next())); 403 _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg())); 404 _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next())); 405 406 _LONG_NO_RCX_REG_mask = _LONG_REG_mask; 407 _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg())); 408 _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next())); 409 410 _INT_REG_mask = _ALL_INT_REG_mask; 411 if (PreserveFramePointer) { 412 _INT_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg())); 413 } 414 if (need_r12_heapbase()) { 415 _INT_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg())); 416 } 417 418 _STACK_OR_INT_REG_mask = _INT_REG_mask; 419 _STACK_OR_INT_REG_mask.OR(STACK_OR_STACK_SLOTS_mask()); 420 421 _INT_NO_RAX_RDX_REG_mask = _INT_REG_mask; 422 _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg())); 423 _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg())); 424 425 _INT_NO_RCX_REG_mask = _INT_REG_mask; 426 _INT_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg())); 427 428 if (Matcher::has_predicated_vectors()) { 429 // Post-loop multi-versioning expects mask to be present in K1 register, till the time 430 // its fixed, RA should not be allocting K1 register, this shall prevent any accidental 431 // curruption of value held in K1 register. 432 if (PostLoopMultiversioning) { 433 const_cast<RegMask*>(&_VECTMASK_REG_mask)->Remove(OptoReg::as_OptoReg(k1->as_VMReg())); 434 const_cast<RegMask*>(&_VECTMASK_REG_mask)->Remove(OptoReg::as_OptoReg(k1->as_VMReg()->next())); 435 } 436 } 437 } 438 439 static bool generate_vzeroupper(Compile* C) { 440 return (VM_Version::supports_vzeroupper() && (C->max_vector_size() > 16 || C->clear_upper_avx() == true)) ? true: false; // Generate vzeroupper 441 } 442 443 static int clear_avx_size() { 444 return generate_vzeroupper(Compile::current()) ? 3: 0; // vzeroupper 445 } 446 447 // !!!!! Special hack to get all types of calls to specify the byte offset 448 // from the start of the call to the point where the return address 449 // will point. 450 int MachCallStaticJavaNode::ret_addr_offset() 451 { 452 int offset = 5; // 5 bytes from start of call to where return address points 453 offset += clear_avx_size(); 454 return offset; 455 } 456 457 int MachCallDynamicJavaNode::ret_addr_offset() 458 { 459 int offset = 15; // 15 bytes from start of call to where return address points 460 offset += clear_avx_size(); 461 return offset; 462 } 463 464 int MachCallRuntimeNode::ret_addr_offset() { 465 int offset = 13; // movq r10,#addr; callq (r10) 466 if (this->ideal_Opcode() != Op_CallLeafVector) { 467 offset += clear_avx_size(); 468 } 469 return offset; 470 } 471 472 int MachCallNativeNode::ret_addr_offset() { 473 int offset = 13; // movq r10,#addr; callq (r10) 474 offset += clear_avx_size(); 475 return offset; 476 } 477 // 478 // Compute padding required for nodes which need alignment 479 // 480 481 // The address of the call instruction needs to be 4-byte aligned to 482 // ensure that it does not span a cache line so that it can be patched. 483 int CallStaticJavaDirectNode::compute_padding(int current_offset) const 484 { 485 current_offset += clear_avx_size(); // skip vzeroupper 486 current_offset += 1; // skip call opcode byte 487 return align_up(current_offset, alignment_required()) - current_offset; 488 } 489 490 // The address of the call instruction needs to be 4-byte aligned to 491 // ensure that it does not span a cache line so that it can be patched. 492 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const 493 { 494 current_offset += clear_avx_size(); // skip vzeroupper 495 current_offset += 11; // skip movq instruction + call opcode byte 496 return align_up(current_offset, alignment_required()) - current_offset; 497 } 498 499 // EMIT_RM() 500 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 501 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3); 502 cbuf.insts()->emit_int8(c); 503 } 504 505 // EMIT_CC() 506 void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 507 unsigned char c = (unsigned char) (f1 | f2); 508 cbuf.insts()->emit_int8(c); 509 } 510 511 // EMIT_OPCODE() 512 void emit_opcode(CodeBuffer &cbuf, int code) { 513 cbuf.insts()->emit_int8((unsigned char) code); 514 } 515 516 // EMIT_OPCODE() w/ relocation information 517 void emit_opcode(CodeBuffer &cbuf, 518 int code, relocInfo::relocType reloc, int offset, int format) 519 { 520 cbuf.relocate(cbuf.insts_mark() + offset, reloc, format); 521 emit_opcode(cbuf, code); 522 } 523 524 // EMIT_D8() 525 void emit_d8(CodeBuffer &cbuf, int d8) { 526 cbuf.insts()->emit_int8((unsigned char) d8); 527 } 528 529 // EMIT_D16() 530 void emit_d16(CodeBuffer &cbuf, int d16) { 531 cbuf.insts()->emit_int16(d16); 532 } 533 534 // EMIT_D32() 535 void emit_d32(CodeBuffer &cbuf, int d32) { 536 cbuf.insts()->emit_int32(d32); 537 } 538 539 // EMIT_D64() 540 void emit_d64(CodeBuffer &cbuf, int64_t d64) { 541 cbuf.insts()->emit_int64(d64); 542 } 543 544 // emit 32 bit value and construct relocation entry from relocInfo::relocType 545 void emit_d32_reloc(CodeBuffer& cbuf, 546 int d32, 547 relocInfo::relocType reloc, 548 int format) 549 { 550 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc"); 551 cbuf.relocate(cbuf.insts_mark(), reloc, format); 552 cbuf.insts()->emit_int32(d32); 553 } 554 555 // emit 32 bit value and construct relocation entry from RelocationHolder 556 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) { 557 #ifdef ASSERT 558 if (rspec.reloc()->type() == relocInfo::oop_type && 559 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { 560 assert(Universe::heap()->is_in((address)(intptr_t)d32), "should be real oop"); 561 assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)), "cannot embed broken oops in code"); 562 } 563 #endif 564 cbuf.relocate(cbuf.insts_mark(), rspec, format); 565 cbuf.insts()->emit_int32(d32); 566 } 567 568 void emit_d32_reloc(CodeBuffer& cbuf, address addr) { 569 address next_ip = cbuf.insts_end() + 4; 570 emit_d32_reloc(cbuf, (int) (addr - next_ip), 571 external_word_Relocation::spec(addr), 572 RELOC_DISP32); 573 } 574 575 576 // emit 64 bit value and construct relocation entry from relocInfo::relocType 577 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) { 578 cbuf.relocate(cbuf.insts_mark(), reloc, format); 579 cbuf.insts()->emit_int64(d64); 580 } 581 582 // emit 64 bit value and construct relocation entry from RelocationHolder 583 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { 584 #ifdef ASSERT 585 if (rspec.reloc()->type() == relocInfo::oop_type && 586 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { 587 assert(Universe::heap()->is_in((address)d64), "should be real oop"); 588 assert(oopDesc::is_oop(cast_to_oop(d64)), "cannot embed broken oops in code"); 589 } 590 #endif 591 cbuf.relocate(cbuf.insts_mark(), rspec, format); 592 cbuf.insts()->emit_int64(d64); 593 } 594 595 // Access stack slot for load or store 596 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) 597 { 598 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src]) 599 if (-0x80 <= disp && disp < 0x80) { 600 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte 601 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte 602 emit_d8(cbuf, disp); // Displacement // R/M byte 603 } else { 604 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte 605 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte 606 emit_d32(cbuf, disp); // Displacement // R/M byte 607 } 608 } 609 610 // rRegI ereg, memory mem) %{ // emit_reg_mem 611 void encode_RegMem(CodeBuffer &cbuf, 612 int reg, 613 int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) 614 { 615 assert(disp_reloc == relocInfo::none, "cannot have disp"); 616 int regenc = reg & 7; 617 int baseenc = base & 7; 618 int indexenc = index & 7; 619 620 // There is no index & no scale, use form without SIB byte 621 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) { 622 // If no displacement, mode is 0x0; unless base is [RBP] or [R13] 623 if (disp == 0 && base != RBP_enc && base != R13_enc) { 624 emit_rm(cbuf, 0x0, regenc, baseenc); // * 625 } else if (-0x80 <= disp && disp < 0x80 && disp_reloc == relocInfo::none) { 626 // If 8-bit displacement, mode 0x1 627 emit_rm(cbuf, 0x1, regenc, baseenc); // * 628 emit_d8(cbuf, disp); 629 } else { 630 // If 32-bit displacement 631 if (base == -1) { // Special flag for absolute address 632 emit_rm(cbuf, 0x0, regenc, 0x5); // * 633 if (disp_reloc != relocInfo::none) { 634 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 635 } else { 636 emit_d32(cbuf, disp); 637 } 638 } else { 639 // Normal base + offset 640 emit_rm(cbuf, 0x2, regenc, baseenc); // * 641 if (disp_reloc != relocInfo::none) { 642 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 643 } else { 644 emit_d32(cbuf, disp); 645 } 646 } 647 } 648 } else { 649 // Else, encode with the SIB byte 650 // If no displacement, mode is 0x0; unless base is [RBP] or [R13] 651 if (disp == 0 && base != RBP_enc && base != R13_enc) { 652 // If no displacement 653 emit_rm(cbuf, 0x0, regenc, 0x4); // * 654 emit_rm(cbuf, scale, indexenc, baseenc); 655 } else { 656 if (-0x80 <= disp && disp < 0x80 && disp_reloc == relocInfo::none) { 657 // If 8-bit displacement, mode 0x1 658 emit_rm(cbuf, 0x1, regenc, 0x4); // * 659 emit_rm(cbuf, scale, indexenc, baseenc); 660 emit_d8(cbuf, disp); 661 } else { 662 // If 32-bit displacement 663 if (base == 0x04 ) { 664 emit_rm(cbuf, 0x2, regenc, 0x4); 665 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid??? 666 } else { 667 emit_rm(cbuf, 0x2, regenc, 0x4); 668 emit_rm(cbuf, scale, indexenc, baseenc); // * 669 } 670 if (disp_reloc != relocInfo::none) { 671 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 672 } else { 673 emit_d32(cbuf, disp); 674 } 675 } 676 } 677 } 678 } 679 680 // This could be in MacroAssembler but it's fairly C2 specific 681 void emit_cmpfp_fixup(MacroAssembler& _masm) { 682 Label exit; 683 __ jccb(Assembler::noParity, exit); 684 __ pushf(); 685 // 686 // comiss/ucomiss instructions set ZF,PF,CF flags and 687 // zero OF,AF,SF for NaN values. 688 // Fixup flags by zeroing ZF,PF so that compare of NaN 689 // values returns 'less than' result (CF is set). 690 // Leave the rest of flags unchanged. 691 // 692 // 7 6 5 4 3 2 1 0 693 // |S|Z|r|A|r|P|r|C| (r - reserved bit) 694 // 0 0 1 0 1 0 1 1 (0x2B) 695 // 696 __ andq(Address(rsp, 0), 0xffffff2b); 697 __ popf(); 698 __ bind(exit); 699 } 700 701 void emit_cmpfp3(MacroAssembler& _masm, Register dst) { 702 Label done; 703 __ movl(dst, -1); 704 __ jcc(Assembler::parity, done); 705 __ jcc(Assembler::below, done); 706 __ setb(Assembler::notEqual, dst); 707 __ movzbl(dst, dst); 708 __ bind(done); 709 } 710 711 // Math.min() # Math.max() 712 // -------------------------- 713 // ucomis[s/d] # 714 // ja -> b # a 715 // jp -> NaN # NaN 716 // jb -> a # b 717 // je # 718 // |-jz -> a | b # a & b 719 // | -> a # 720 void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst, 721 XMMRegister a, XMMRegister b, 722 XMMRegister xmmt, Register rt, 723 bool min, bool single) { 724 725 Label nan, zero, below, above, done; 726 727 if (single) 728 __ ucomiss(a, b); 729 else 730 __ ucomisd(a, b); 731 732 if (dst->encoding() != (min ? b : a)->encoding()) 733 __ jccb(Assembler::above, above); // CF=0 & ZF=0 734 else 735 __ jccb(Assembler::above, done); 736 737 __ jccb(Assembler::parity, nan); // PF=1 738 __ jccb(Assembler::below, below); // CF=1 739 740 // equal 741 __ vpxor(xmmt, xmmt, xmmt, Assembler::AVX_128bit); 742 if (single) { 743 __ ucomiss(a, xmmt); 744 __ jccb(Assembler::equal, zero); 745 746 __ movflt(dst, a); 747 __ jmp(done); 748 } 749 else { 750 __ ucomisd(a, xmmt); 751 __ jccb(Assembler::equal, zero); 752 753 __ movdbl(dst, a); 754 __ jmp(done); 755 } 756 757 __ bind(zero); 758 if (min) 759 __ vpor(dst, a, b, Assembler::AVX_128bit); 760 else 761 __ vpand(dst, a, b, Assembler::AVX_128bit); 762 763 __ jmp(done); 764 765 __ bind(above); 766 if (single) 767 __ movflt(dst, min ? b : a); 768 else 769 __ movdbl(dst, min ? b : a); 770 771 __ jmp(done); 772 773 __ bind(nan); 774 if (single) { 775 __ movl(rt, 0x7fc00000); // Float.NaN 776 __ movdl(dst, rt); 777 } 778 else { 779 __ mov64(rt, 0x7ff8000000000000L); // Double.NaN 780 __ movdq(dst, rt); 781 } 782 __ jmp(done); 783 784 __ bind(below); 785 if (single) 786 __ movflt(dst, min ? a : b); 787 else 788 __ movdbl(dst, min ? a : b); 789 790 __ bind(done); 791 } 792 793 //============================================================================= 794 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 795 796 int ConstantTable::calculate_table_base_offset() const { 797 return 0; // absolute addressing, no offset 798 } 799 800 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } 801 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { 802 ShouldNotReachHere(); 803 } 804 805 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 806 // Empty encoding 807 } 808 809 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 810 return 0; 811 } 812 813 #ifndef PRODUCT 814 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 815 st->print("# MachConstantBaseNode (empty encoding)"); 816 } 817 #endif 818 819 820 //============================================================================= 821 #ifndef PRODUCT 822 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 823 Compile* C = ra_->C; 824 825 int framesize = C->output()->frame_size_in_bytes(); 826 int bangsize = C->output()->bang_size_in_bytes(); 827 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 828 // Remove wordSize for return addr which is already pushed. 829 framesize -= wordSize; 830 831 if (C->output()->need_stack_bang(bangsize)) { 832 framesize -= wordSize; 833 st->print("# stack bang (%d bytes)", bangsize); 834 st->print("\n\t"); 835 st->print("pushq rbp\t# Save rbp"); 836 if (PreserveFramePointer) { 837 st->print("\n\t"); 838 st->print("movq rbp, rsp\t# Save the caller's SP into rbp"); 839 } 840 if (framesize) { 841 st->print("\n\t"); 842 st->print("subq rsp, #%d\t# Create frame",framesize); 843 } 844 } else { 845 st->print("subq rsp, #%d\t# Create frame",framesize); 846 st->print("\n\t"); 847 framesize -= wordSize; 848 st->print("movq [rsp + #%d], rbp\t# Save rbp",framesize); 849 if (PreserveFramePointer) { 850 st->print("\n\t"); 851 st->print("movq rbp, rsp\t# Save the caller's SP into rbp"); 852 if (framesize > 0) { 853 st->print("\n\t"); 854 st->print("addq rbp, #%d", framesize); 855 } 856 } 857 } 858 859 if (VerifyStackAtCalls) { 860 st->print("\n\t"); 861 framesize -= wordSize; 862 st->print("movq [rsp + #%d], 0xbadb100d\t# Majik cookie for stack depth check",framesize); 863 #ifdef ASSERT 864 st->print("\n\t"); 865 st->print("# stack alignment check"); 866 #endif 867 } 868 if (C->stub_function() != NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { 869 st->print("\n\t"); 870 st->print("cmpl [r15_thread + #disarmed_offset], #disarmed_value\t"); 871 st->print("\n\t"); 872 st->print("je fast_entry\t"); 873 st->print("\n\t"); 874 st->print("call #nmethod_entry_barrier_stub\t"); 875 st->print("\n\tfast_entry:"); 876 } 877 st->cr(); 878 } 879 #endif 880 881 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 882 Compile* C = ra_->C; 883 MacroAssembler _masm(&cbuf); 884 885 int framesize = C->output()->frame_size_in_bytes(); 886 int bangsize = C->output()->bang_size_in_bytes(); 887 888 if (C->clinit_barrier_on_entry()) { 889 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 890 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started"); 891 892 Label L_skip_barrier; 893 Register klass = rscratch1; 894 895 __ mov_metadata(klass, C->method()->holder()->constant_encoding()); 896 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/); 897 898 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path 899 900 __ bind(L_skip_barrier); 901 } 902 903 __ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != NULL); 904 905 C->output()->set_frame_complete(cbuf.insts_size()); 906 907 if (C->has_mach_constant_base_node()) { 908 // NOTE: We set the table base offset here because users might be 909 // emitted before MachConstantBaseNode. 910 ConstantTable& constant_table = C->output()->constant_table(); 911 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 912 } 913 } 914 915 uint MachPrologNode::size(PhaseRegAlloc* ra_) const 916 { 917 return MachNode::size(ra_); // too many variables; just compute it 918 // the hard way 919 } 920 921 int MachPrologNode::reloc() const 922 { 923 return 0; // a large enough number 924 } 925 926 //============================================================================= 927 #ifndef PRODUCT 928 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const 929 { 930 Compile* C = ra_->C; 931 if (generate_vzeroupper(C)) { 932 st->print("vzeroupper"); 933 st->cr(); st->print("\t"); 934 } 935 936 int framesize = C->output()->frame_size_in_bytes(); 937 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 938 // Remove word for return adr already pushed 939 // and RBP 940 framesize -= 2*wordSize; 941 942 if (framesize) { 943 st->print_cr("addq rsp, %d\t# Destroy frame", framesize); 944 st->print("\t"); 945 } 946 947 st->print_cr("popq rbp"); 948 if (do_polling() && C->is_method_compilation()) { 949 st->print("\t"); 950 st->print_cr("cmpq rsp, poll_offset[r15_thread] \n\t" 951 "ja #safepoint_stub\t" 952 "# Safepoint: poll for GC"); 953 } 954 } 955 #endif 956 957 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 958 { 959 Compile* C = ra_->C; 960 MacroAssembler _masm(&cbuf); 961 962 if (generate_vzeroupper(C)) { 963 // Clear upper bits of YMM registers when current compiled code uses 964 // wide vectors to avoid AVX <-> SSE transition penalty during call. 965 __ vzeroupper(); 966 } 967 968 int framesize = C->output()->frame_size_in_bytes(); 969 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 970 // Remove word for return adr already pushed 971 // and RBP 972 framesize -= 2*wordSize; 973 974 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here 975 976 if (framesize) { 977 emit_opcode(cbuf, Assembler::REX_W); 978 if (framesize < 0x80) { 979 emit_opcode(cbuf, 0x83); // addq rsp, #framesize 980 emit_rm(cbuf, 0x3, 0x00, RSP_enc); 981 emit_d8(cbuf, framesize); 982 } else { 983 emit_opcode(cbuf, 0x81); // addq rsp, #framesize 984 emit_rm(cbuf, 0x3, 0x00, RSP_enc); 985 emit_d32(cbuf, framesize); 986 } 987 } 988 989 // popq rbp 990 emit_opcode(cbuf, 0x58 | RBP_enc); 991 992 if (StackReservedPages > 0 && C->has_reserved_stack_access()) { 993 __ reserved_stack_check(); 994 } 995 996 if (do_polling() && C->is_method_compilation()) { 997 MacroAssembler _masm(&cbuf); 998 Label dummy_label; 999 Label* code_stub = &dummy_label; 1000 if (!C->output()->in_scratch_emit_size()) { 1001 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset()); 1002 C->output()->add_stub(stub); 1003 code_stub = &stub->entry(); 1004 } 1005 __ relocate(relocInfo::poll_return_type); 1006 __ safepoint_poll(*code_stub, r15_thread, true /* at_return */, true /* in_nmethod */); 1007 } 1008 } 1009 1010 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const 1011 { 1012 return MachNode::size(ra_); // too many variables; just compute it 1013 // the hard way 1014 } 1015 1016 int MachEpilogNode::reloc() const 1017 { 1018 return 2; // a large enough number 1019 } 1020 1021 const Pipeline* MachEpilogNode::pipeline() const 1022 { 1023 return MachNode::pipeline_class(); 1024 } 1025 1026 //============================================================================= 1027 1028 enum RC { 1029 rc_bad, 1030 rc_int, 1031 rc_kreg, 1032 rc_float, 1033 rc_stack 1034 }; 1035 1036 static enum RC rc_class(OptoReg::Name reg) 1037 { 1038 if( !OptoReg::is_valid(reg) ) return rc_bad; 1039 1040 if (OptoReg::is_stack(reg)) return rc_stack; 1041 1042 VMReg r = OptoReg::as_VMReg(reg); 1043 1044 if (r->is_Register()) return rc_int; 1045 1046 if (r->is_KRegister()) return rc_kreg; 1047 1048 assert(r->is_XMMRegister(), "must be"); 1049 return rc_float; 1050 } 1051 1052 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad. 1053 static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo, 1054 int src_hi, int dst_hi, uint ireg, outputStream* st); 1055 1056 void vec_spill_helper(CodeBuffer *cbuf, bool is_load, 1057 int stack_offset, int reg, uint ireg, outputStream* st); 1058 1059 static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset, 1060 int dst_offset, uint ireg, outputStream* st) { 1061 if (cbuf) { 1062 MacroAssembler _masm(cbuf); 1063 switch (ireg) { 1064 case Op_VecS: 1065 __ movq(Address(rsp, -8), rax); 1066 __ movl(rax, Address(rsp, src_offset)); 1067 __ movl(Address(rsp, dst_offset), rax); 1068 __ movq(rax, Address(rsp, -8)); 1069 break; 1070 case Op_VecD: 1071 __ pushq(Address(rsp, src_offset)); 1072 __ popq (Address(rsp, dst_offset)); 1073 break; 1074 case Op_VecX: 1075 __ pushq(Address(rsp, src_offset)); 1076 __ popq (Address(rsp, dst_offset)); 1077 __ pushq(Address(rsp, src_offset+8)); 1078 __ popq (Address(rsp, dst_offset+8)); 1079 break; 1080 case Op_VecY: 1081 __ vmovdqu(Address(rsp, -32), xmm0); 1082 __ vmovdqu(xmm0, Address(rsp, src_offset)); 1083 __ vmovdqu(Address(rsp, dst_offset), xmm0); 1084 __ vmovdqu(xmm0, Address(rsp, -32)); 1085 break; 1086 case Op_VecZ: 1087 __ evmovdquq(Address(rsp, -64), xmm0, 2); 1088 __ evmovdquq(xmm0, Address(rsp, src_offset), 2); 1089 __ evmovdquq(Address(rsp, dst_offset), xmm0, 2); 1090 __ evmovdquq(xmm0, Address(rsp, -64), 2); 1091 break; 1092 default: 1093 ShouldNotReachHere(); 1094 } 1095 #ifndef PRODUCT 1096 } else { 1097 switch (ireg) { 1098 case Op_VecS: 1099 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t" 1100 "movl rax, [rsp + #%d]\n\t" 1101 "movl [rsp + #%d], rax\n\t" 1102 "movq rax, [rsp - #8]", 1103 src_offset, dst_offset); 1104 break; 1105 case Op_VecD: 1106 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" 1107 "popq [rsp + #%d]", 1108 src_offset, dst_offset); 1109 break; 1110 case Op_VecX: 1111 st->print("pushq [rsp + #%d]\t# 128-bit mem-mem spill\n\t" 1112 "popq [rsp + #%d]\n\t" 1113 "pushq [rsp + #%d]\n\t" 1114 "popq [rsp + #%d]", 1115 src_offset, dst_offset, src_offset+8, dst_offset+8); 1116 break; 1117 case Op_VecY: 1118 st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t" 1119 "vmovdqu xmm0, [rsp + #%d]\n\t" 1120 "vmovdqu [rsp + #%d], xmm0\n\t" 1121 "vmovdqu xmm0, [rsp - #32]", 1122 src_offset, dst_offset); 1123 break; 1124 case Op_VecZ: 1125 st->print("vmovdqu [rsp - #64], xmm0\t# 512-bit mem-mem spill\n\t" 1126 "vmovdqu xmm0, [rsp + #%d]\n\t" 1127 "vmovdqu [rsp + #%d], xmm0\n\t" 1128 "vmovdqu xmm0, [rsp - #64]", 1129 src_offset, dst_offset); 1130 break; 1131 default: 1132 ShouldNotReachHere(); 1133 } 1134 #endif 1135 } 1136 } 1137 1138 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf, 1139 PhaseRegAlloc* ra_, 1140 bool do_size, 1141 outputStream* st) const { 1142 assert(cbuf != NULL || st != NULL, "sanity"); 1143 // Get registers to move 1144 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1145 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1146 OptoReg::Name dst_second = ra_->get_reg_second(this); 1147 OptoReg::Name dst_first = ra_->get_reg_first(this); 1148 1149 enum RC src_second_rc = rc_class(src_second); 1150 enum RC src_first_rc = rc_class(src_first); 1151 enum RC dst_second_rc = rc_class(dst_second); 1152 enum RC dst_first_rc = rc_class(dst_first); 1153 1154 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), 1155 "must move at least 1 register" ); 1156 1157 if (src_first == dst_first && src_second == dst_second) { 1158 // Self copy, no move 1159 return 0; 1160 } 1161 if (bottom_type()->isa_vect() != NULL && bottom_type()->isa_vectmask() == NULL) { 1162 uint ireg = ideal_reg(); 1163 assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity"); 1164 assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity"); 1165 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1166 // mem -> mem 1167 int src_offset = ra_->reg2offset(src_first); 1168 int dst_offset = ra_->reg2offset(dst_first); 1169 vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st); 1170 } else if (src_first_rc == rc_float && dst_first_rc == rc_float ) { 1171 vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st); 1172 } else if (src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1173 int stack_offset = ra_->reg2offset(dst_first); 1174 vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st); 1175 } else if (src_first_rc == rc_stack && dst_first_rc == rc_float ) { 1176 int stack_offset = ra_->reg2offset(src_first); 1177 vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st); 1178 } else { 1179 ShouldNotReachHere(); 1180 } 1181 return 0; 1182 } 1183 if (src_first_rc == rc_stack) { 1184 // mem -> 1185 if (dst_first_rc == rc_stack) { 1186 // mem -> mem 1187 assert(src_second != dst_first, "overlap"); 1188 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1189 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1190 // 64-bit 1191 int src_offset = ra_->reg2offset(src_first); 1192 int dst_offset = ra_->reg2offset(dst_first); 1193 if (cbuf) { 1194 MacroAssembler _masm(cbuf); 1195 __ pushq(Address(rsp, src_offset)); 1196 __ popq (Address(rsp, dst_offset)); 1197 #ifndef PRODUCT 1198 } else { 1199 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" 1200 "popq [rsp + #%d]", 1201 src_offset, dst_offset); 1202 #endif 1203 } 1204 } else { 1205 // 32-bit 1206 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1207 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1208 // No pushl/popl, so: 1209 int src_offset = ra_->reg2offset(src_first); 1210 int dst_offset = ra_->reg2offset(dst_first); 1211 if (cbuf) { 1212 MacroAssembler _masm(cbuf); 1213 __ movq(Address(rsp, -8), rax); 1214 __ movl(rax, Address(rsp, src_offset)); 1215 __ movl(Address(rsp, dst_offset), rax); 1216 __ movq(rax, Address(rsp, -8)); 1217 #ifndef PRODUCT 1218 } else { 1219 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t" 1220 "movl rax, [rsp + #%d]\n\t" 1221 "movl [rsp + #%d], rax\n\t" 1222 "movq rax, [rsp - #8]", 1223 src_offset, dst_offset); 1224 #endif 1225 } 1226 } 1227 return 0; 1228 } else if (dst_first_rc == rc_int) { 1229 // mem -> gpr 1230 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1231 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1232 // 64-bit 1233 int offset = ra_->reg2offset(src_first); 1234 if (cbuf) { 1235 MacroAssembler _masm(cbuf); 1236 __ movq(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset)); 1237 #ifndef PRODUCT 1238 } else { 1239 st->print("movq %s, [rsp + #%d]\t# spill", 1240 Matcher::regName[dst_first], 1241 offset); 1242 #endif 1243 } 1244 } else { 1245 // 32-bit 1246 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1247 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1248 int offset = ra_->reg2offset(src_first); 1249 if (cbuf) { 1250 MacroAssembler _masm(cbuf); 1251 __ movl(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset)); 1252 #ifndef PRODUCT 1253 } else { 1254 st->print("movl %s, [rsp + #%d]\t# spill", 1255 Matcher::regName[dst_first], 1256 offset); 1257 #endif 1258 } 1259 } 1260 return 0; 1261 } else if (dst_first_rc == rc_float) { 1262 // mem-> xmm 1263 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1264 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1265 // 64-bit 1266 int offset = ra_->reg2offset(src_first); 1267 if (cbuf) { 1268 MacroAssembler _masm(cbuf); 1269 __ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset)); 1270 #ifndef PRODUCT 1271 } else { 1272 st->print("%s %s, [rsp + #%d]\t# spill", 1273 UseXmmLoadAndClearUpper ? "movsd " : "movlpd", 1274 Matcher::regName[dst_first], 1275 offset); 1276 #endif 1277 } 1278 } else { 1279 // 32-bit 1280 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1281 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1282 int offset = ra_->reg2offset(src_first); 1283 if (cbuf) { 1284 MacroAssembler _masm(cbuf); 1285 __ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset)); 1286 #ifndef PRODUCT 1287 } else { 1288 st->print("movss %s, [rsp + #%d]\t# spill", 1289 Matcher::regName[dst_first], 1290 offset); 1291 #endif 1292 } 1293 } 1294 return 0; 1295 } else if (dst_first_rc == rc_kreg) { 1296 // mem -> kreg 1297 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1298 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1299 // 64-bit 1300 int offset = ra_->reg2offset(src_first); 1301 if (cbuf) { 1302 MacroAssembler _masm(cbuf); 1303 __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset)); 1304 #ifndef PRODUCT 1305 } else { 1306 st->print("kmovq %s, [rsp + #%d]\t# spill", 1307 Matcher::regName[dst_first], 1308 offset); 1309 #endif 1310 } 1311 } 1312 return 0; 1313 } 1314 } else if (src_first_rc == rc_int) { 1315 // gpr -> 1316 if (dst_first_rc == rc_stack) { 1317 // gpr -> mem 1318 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1319 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1320 // 64-bit 1321 int offset = ra_->reg2offset(dst_first); 1322 if (cbuf) { 1323 MacroAssembler _masm(cbuf); 1324 __ movq(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first])); 1325 #ifndef PRODUCT 1326 } else { 1327 st->print("movq [rsp + #%d], %s\t# spill", 1328 offset, 1329 Matcher::regName[src_first]); 1330 #endif 1331 } 1332 } else { 1333 // 32-bit 1334 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1335 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1336 int offset = ra_->reg2offset(dst_first); 1337 if (cbuf) { 1338 MacroAssembler _masm(cbuf); 1339 __ movl(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first])); 1340 #ifndef PRODUCT 1341 } else { 1342 st->print("movl [rsp + #%d], %s\t# spill", 1343 offset, 1344 Matcher::regName[src_first]); 1345 #endif 1346 } 1347 } 1348 return 0; 1349 } else if (dst_first_rc == rc_int) { 1350 // gpr -> gpr 1351 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1352 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1353 // 64-bit 1354 if (cbuf) { 1355 MacroAssembler _masm(cbuf); 1356 __ movq(as_Register(Matcher::_regEncode[dst_first]), 1357 as_Register(Matcher::_regEncode[src_first])); 1358 #ifndef PRODUCT 1359 } else { 1360 st->print("movq %s, %s\t# spill", 1361 Matcher::regName[dst_first], 1362 Matcher::regName[src_first]); 1363 #endif 1364 } 1365 return 0; 1366 } else { 1367 // 32-bit 1368 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1369 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1370 if (cbuf) { 1371 MacroAssembler _masm(cbuf); 1372 __ movl(as_Register(Matcher::_regEncode[dst_first]), 1373 as_Register(Matcher::_regEncode[src_first])); 1374 #ifndef PRODUCT 1375 } else { 1376 st->print("movl %s, %s\t# spill", 1377 Matcher::regName[dst_first], 1378 Matcher::regName[src_first]); 1379 #endif 1380 } 1381 return 0; 1382 } 1383 } else if (dst_first_rc == rc_float) { 1384 // gpr -> xmm 1385 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1386 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1387 // 64-bit 1388 if (cbuf) { 1389 MacroAssembler _masm(cbuf); 1390 __ movdq( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first])); 1391 #ifndef PRODUCT 1392 } else { 1393 st->print("movdq %s, %s\t# spill", 1394 Matcher::regName[dst_first], 1395 Matcher::regName[src_first]); 1396 #endif 1397 } 1398 } else { 1399 // 32-bit 1400 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1401 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1402 if (cbuf) { 1403 MacroAssembler _masm(cbuf); 1404 __ movdl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first])); 1405 #ifndef PRODUCT 1406 } else { 1407 st->print("movdl %s, %s\t# spill", 1408 Matcher::regName[dst_first], 1409 Matcher::regName[src_first]); 1410 #endif 1411 } 1412 } 1413 return 0; 1414 } else if (dst_first_rc == rc_kreg) { 1415 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1416 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1417 // 64-bit 1418 if (cbuf) { 1419 MacroAssembler _masm(cbuf); 1420 __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first])); 1421 #ifndef PRODUCT 1422 } else { 1423 st->print("kmovq %s, %s\t# spill", 1424 Matcher::regName[dst_first], 1425 Matcher::regName[src_first]); 1426 #endif 1427 } 1428 } 1429 Unimplemented(); 1430 return 0; 1431 } 1432 } else if (src_first_rc == rc_float) { 1433 // xmm -> 1434 if (dst_first_rc == rc_stack) { 1435 // xmm -> mem 1436 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1437 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1438 // 64-bit 1439 int offset = ra_->reg2offset(dst_first); 1440 if (cbuf) { 1441 MacroAssembler _masm(cbuf); 1442 __ movdbl( Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first])); 1443 #ifndef PRODUCT 1444 } else { 1445 st->print("movsd [rsp + #%d], %s\t# spill", 1446 offset, 1447 Matcher::regName[src_first]); 1448 #endif 1449 } 1450 } else { 1451 // 32-bit 1452 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1453 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1454 int offset = ra_->reg2offset(dst_first); 1455 if (cbuf) { 1456 MacroAssembler _masm(cbuf); 1457 __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first])); 1458 #ifndef PRODUCT 1459 } else { 1460 st->print("movss [rsp + #%d], %s\t# spill", 1461 offset, 1462 Matcher::regName[src_first]); 1463 #endif 1464 } 1465 } 1466 return 0; 1467 } else if (dst_first_rc == rc_int) { 1468 // xmm -> gpr 1469 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1470 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1471 // 64-bit 1472 if (cbuf) { 1473 MacroAssembler _masm(cbuf); 1474 __ movdq( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first])); 1475 #ifndef PRODUCT 1476 } else { 1477 st->print("movdq %s, %s\t# spill", 1478 Matcher::regName[dst_first], 1479 Matcher::regName[src_first]); 1480 #endif 1481 } 1482 } else { 1483 // 32-bit 1484 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1485 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1486 if (cbuf) { 1487 MacroAssembler _masm(cbuf); 1488 __ movdl( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first])); 1489 #ifndef PRODUCT 1490 } else { 1491 st->print("movdl %s, %s\t# spill", 1492 Matcher::regName[dst_first], 1493 Matcher::regName[src_first]); 1494 #endif 1495 } 1496 } 1497 return 0; 1498 } else if (dst_first_rc == rc_float) { 1499 // xmm -> xmm 1500 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1501 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1502 // 64-bit 1503 if (cbuf) { 1504 MacroAssembler _masm(cbuf); 1505 __ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first])); 1506 #ifndef PRODUCT 1507 } else { 1508 st->print("%s %s, %s\t# spill", 1509 UseXmmRegToRegMoveAll ? "movapd" : "movsd ", 1510 Matcher::regName[dst_first], 1511 Matcher::regName[src_first]); 1512 #endif 1513 } 1514 } else { 1515 // 32-bit 1516 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1517 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1518 if (cbuf) { 1519 MacroAssembler _masm(cbuf); 1520 __ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first])); 1521 #ifndef PRODUCT 1522 } else { 1523 st->print("%s %s, %s\t# spill", 1524 UseXmmRegToRegMoveAll ? "movaps" : "movss ", 1525 Matcher::regName[dst_first], 1526 Matcher::regName[src_first]); 1527 #endif 1528 } 1529 } 1530 return 0; 1531 } else if (dst_first_rc == rc_kreg) { 1532 assert(false, "Illegal spilling"); 1533 return 0; 1534 } 1535 } else if (src_first_rc == rc_kreg) { 1536 if (dst_first_rc == rc_stack) { 1537 // mem -> kreg 1538 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1539 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1540 // 64-bit 1541 int offset = ra_->reg2offset(dst_first); 1542 if (cbuf) { 1543 MacroAssembler _masm(cbuf); 1544 __ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first])); 1545 #ifndef PRODUCT 1546 } else { 1547 st->print("kmovq [rsp + #%d] , %s\t# spill", 1548 offset, 1549 Matcher::regName[src_first]); 1550 #endif 1551 } 1552 } 1553 return 0; 1554 } else if (dst_first_rc == rc_int) { 1555 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1556 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1557 // 64-bit 1558 if (cbuf) { 1559 MacroAssembler _masm(cbuf); 1560 __ kmov(as_Register(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first])); 1561 #ifndef PRODUCT 1562 } else { 1563 st->print("kmovq %s, %s\t# spill", 1564 Matcher::regName[dst_first], 1565 Matcher::regName[src_first]); 1566 #endif 1567 } 1568 } 1569 Unimplemented(); 1570 return 0; 1571 } else if (dst_first_rc == rc_kreg) { 1572 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1573 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1574 // 64-bit 1575 if (cbuf) { 1576 MacroAssembler _masm(cbuf); 1577 __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first])); 1578 #ifndef PRODUCT 1579 } else { 1580 st->print("kmovq %s, %s\t# spill", 1581 Matcher::regName[dst_first], 1582 Matcher::regName[src_first]); 1583 #endif 1584 } 1585 } 1586 return 0; 1587 } else if (dst_first_rc == rc_float) { 1588 assert(false, "Illegal spill"); 1589 return 0; 1590 } 1591 } 1592 1593 assert(0," foo "); 1594 Unimplemented(); 1595 return 0; 1596 } 1597 1598 #ifndef PRODUCT 1599 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const { 1600 implementation(NULL, ra_, false, st); 1601 } 1602 #endif 1603 1604 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1605 implementation(&cbuf, ra_, false, NULL); 1606 } 1607 1608 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1609 return MachNode::size(ra_); 1610 } 1611 1612 //============================================================================= 1613 #ifndef PRODUCT 1614 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const 1615 { 1616 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1617 int reg = ra_->get_reg_first(this); 1618 st->print("leaq %s, [rsp + #%d]\t# box lock", 1619 Matcher::regName[reg], offset); 1620 } 1621 #endif 1622 1623 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1624 { 1625 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1626 int reg = ra_->get_encode(this); 1627 if (offset >= 0x80) { 1628 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR); 1629 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset] 1630 emit_rm(cbuf, 0x2, reg & 7, 0x04); 1631 emit_rm(cbuf, 0x0, 0x04, RSP_enc); 1632 emit_d32(cbuf, offset); 1633 } else { 1634 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR); 1635 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset] 1636 emit_rm(cbuf, 0x1, reg & 7, 0x04); 1637 emit_rm(cbuf, 0x0, 0x04, RSP_enc); 1638 emit_d8(cbuf, offset); 1639 } 1640 } 1641 1642 uint BoxLockNode::size(PhaseRegAlloc *ra_) const 1643 { 1644 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1645 return (offset < 0x80) ? 5 : 8; // REX 1646 } 1647 1648 //============================================================================= 1649 #ifndef PRODUCT 1650 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const 1651 { 1652 if (UseCompressedClassPointers) { 1653 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); 1654 st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); 1655 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check"); 1656 } else { 1657 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t" 1658 "# Inline cache check"); 1659 } 1660 st->print_cr("\tjne SharedRuntime::_ic_miss_stub"); 1661 st->print_cr("\tnop\t# nops to align entry point"); 1662 } 1663 #endif 1664 1665 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1666 { 1667 MacroAssembler masm(&cbuf); 1668 uint insts_size = cbuf.insts_size(); 1669 if (UseCompressedClassPointers) { 1670 masm.load_klass(rscratch1, j_rarg0, rscratch2); 1671 masm.cmpptr(rax, rscratch1); 1672 } else { 1673 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); 1674 } 1675 1676 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1677 1678 /* WARNING these NOPs are critical so that verified entry point is properly 1679 4 bytes aligned for patching by NativeJump::patch_verified_entry() */ 1680 int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3); 1681 if (OptoBreakpoint) { 1682 // Leave space for int3 1683 nops_cnt -= 1; 1684 } 1685 nops_cnt &= 0x3; // Do not add nops if code is aligned. 1686 if (nops_cnt > 0) 1687 masm.nop(nops_cnt); 1688 } 1689 1690 uint MachUEPNode::size(PhaseRegAlloc* ra_) const 1691 { 1692 return MachNode::size(ra_); // too many variables; just compute it 1693 // the hard way 1694 } 1695 1696 1697 //============================================================================= 1698 1699 const bool Matcher::supports_vector_calling_convention(void) { 1700 if (EnableVectorSupport && UseVectorStubs) { 1701 return true; 1702 } 1703 return false; 1704 } 1705 1706 OptoRegPair Matcher::vector_return_value(uint ideal_reg) { 1707 assert(EnableVectorSupport && UseVectorStubs, "sanity"); 1708 int lo = XMM0_num; 1709 int hi = XMM0b_num; 1710 if (ideal_reg == Op_VecX) hi = XMM0d_num; 1711 else if (ideal_reg == Op_VecY) hi = XMM0h_num; 1712 else if (ideal_reg == Op_VecZ) hi = XMM0p_num; 1713 return OptoRegPair(hi, lo); 1714 } 1715 1716 // Is this branch offset short enough that a short branch can be used? 1717 // 1718 // NOTE: If the platform does not provide any short branch variants, then 1719 // this method should return false for offset 0. 1720 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1721 // The passed offset is relative to address of the branch. 1722 // On 86 a branch displacement is calculated relative to address 1723 // of a next instruction. 1724 offset -= br_size; 1725 1726 // the short version of jmpConUCF2 contains multiple branches, 1727 // making the reach slightly less 1728 if (rule == jmpConUCF2_rule) 1729 return (-126 <= offset && offset <= 125); 1730 return (-128 <= offset && offset <= 127); 1731 } 1732 1733 // Return whether or not this register is ever used as an argument. 1734 // This function is used on startup to build the trampoline stubs in 1735 // generateOptoStub. Registers not mentioned will be killed by the VM 1736 // call in the trampoline, and arguments in those registers not be 1737 // available to the callee. 1738 bool Matcher::can_be_java_arg(int reg) 1739 { 1740 return 1741 reg == RDI_num || reg == RDI_H_num || 1742 reg == RSI_num || reg == RSI_H_num || 1743 reg == RDX_num || reg == RDX_H_num || 1744 reg == RCX_num || reg == RCX_H_num || 1745 reg == R8_num || reg == R8_H_num || 1746 reg == R9_num || reg == R9_H_num || 1747 reg == R12_num || reg == R12_H_num || 1748 reg == XMM0_num || reg == XMM0b_num || 1749 reg == XMM1_num || reg == XMM1b_num || 1750 reg == XMM2_num || reg == XMM2b_num || 1751 reg == XMM3_num || reg == XMM3b_num || 1752 reg == XMM4_num || reg == XMM4b_num || 1753 reg == XMM5_num || reg == XMM5b_num || 1754 reg == XMM6_num || reg == XMM6b_num || 1755 reg == XMM7_num || reg == XMM7b_num; 1756 } 1757 1758 bool Matcher::is_spillable_arg(int reg) 1759 { 1760 return can_be_java_arg(reg); 1761 } 1762 1763 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1764 // In 64 bit mode a code which use multiply when 1765 // devisor is constant is faster than hardware 1766 // DIV instruction (it uses MulHiL). 1767 return false; 1768 } 1769 1770 // Register for DIVI projection of divmodI 1771 RegMask Matcher::divI_proj_mask() { 1772 return INT_RAX_REG_mask(); 1773 } 1774 1775 // Register for MODI projection of divmodI 1776 RegMask Matcher::modI_proj_mask() { 1777 return INT_RDX_REG_mask(); 1778 } 1779 1780 // Register for DIVL projection of divmodL 1781 RegMask Matcher::divL_proj_mask() { 1782 return LONG_RAX_REG_mask(); 1783 } 1784 1785 // Register for MODL projection of divmodL 1786 RegMask Matcher::modL_proj_mask() { 1787 return LONG_RDX_REG_mask(); 1788 } 1789 1790 // Register for saving SP into on method handle invokes. Not used on x86_64. 1791 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 1792 return NO_REG_mask(); 1793 } 1794 1795 %} 1796 1797 //----------ENCODING BLOCK----------------------------------------------------- 1798 // This block specifies the encoding classes used by the compiler to 1799 // output byte streams. Encoding classes are parameterized macros 1800 // used by Machine Instruction Nodes in order to generate the bit 1801 // encoding of the instruction. Operands specify their base encoding 1802 // interface with the interface keyword. There are currently 1803 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, & 1804 // COND_INTER. REG_INTER causes an operand to generate a function 1805 // which returns its register number when queried. CONST_INTER causes 1806 // an operand to generate a function which returns the value of the 1807 // constant when queried. MEMORY_INTER causes an operand to generate 1808 // four functions which return the Base Register, the Index Register, 1809 // the Scale Value, and the Offset Value of the operand when queried. 1810 // COND_INTER causes an operand to generate six functions which return 1811 // the encoding code (ie - encoding bits for the instruction) 1812 // associated with each basic boolean condition for a conditional 1813 // instruction. 1814 // 1815 // Instructions specify two basic values for encoding. Again, a 1816 // function is available to check if the constant displacement is an 1817 // oop. They use the ins_encode keyword to specify their encoding 1818 // classes (which must be a sequence of enc_class names, and their 1819 // parameters, specified in the encoding block), and they use the 1820 // opcode keyword to specify, in order, their primary, secondary, and 1821 // tertiary opcode. Only the opcode sections which a particular 1822 // instruction needs for encoding need to be specified. 1823 encode %{ 1824 // Build emit functions for each basic byte or larger field in the 1825 // intel encoding scheme (opcode, rm, sib, immediate), and call them 1826 // from C++ code in the enc_class source block. Emit functions will 1827 // live in the main source block for now. In future, we can 1828 // generalize this by adding a syntax that specifies the sizes of 1829 // fields in an order, so that the adlc can build the emit functions 1830 // automagically 1831 1832 // Emit primary opcode 1833 enc_class OpcP 1834 %{ 1835 emit_opcode(cbuf, $primary); 1836 %} 1837 1838 // Emit secondary opcode 1839 enc_class OpcS 1840 %{ 1841 emit_opcode(cbuf, $secondary); 1842 %} 1843 1844 // Emit tertiary opcode 1845 enc_class OpcT 1846 %{ 1847 emit_opcode(cbuf, $tertiary); 1848 %} 1849 1850 // Emit opcode directly 1851 enc_class Opcode(immI d8) 1852 %{ 1853 emit_opcode(cbuf, $d8$$constant); 1854 %} 1855 1856 // Emit size prefix 1857 enc_class SizePrefix 1858 %{ 1859 emit_opcode(cbuf, 0x66); 1860 %} 1861 1862 enc_class reg(rRegI reg) 1863 %{ 1864 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7); 1865 %} 1866 1867 enc_class reg_reg(rRegI dst, rRegI src) 1868 %{ 1869 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 1870 %} 1871 1872 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src) 1873 %{ 1874 emit_opcode(cbuf, $opcode$$constant); 1875 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 1876 %} 1877 1878 enc_class cdql_enc(no_rax_rdx_RegI div) 1879 %{ 1880 // Full implementation of Java idiv and irem; checks for 1881 // special case as described in JVM spec., p.243 & p.271. 1882 // 1883 // normal case special case 1884 // 1885 // input : rax: dividend min_int 1886 // reg: divisor -1 1887 // 1888 // output: rax: quotient (= rax idiv reg) min_int 1889 // rdx: remainder (= rax irem reg) 0 1890 // 1891 // Code sequnce: 1892 // 1893 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax 1894 // 5: 75 07/08 jne e <normal> 1895 // 7: 33 d2 xor %edx,%edx 1896 // [div >= 8 -> offset + 1] 1897 // [REX_B] 1898 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div 1899 // c: 74 03/04 je 11 <done> 1900 // 000000000000000e <normal>: 1901 // e: 99 cltd 1902 // [div >= 8 -> offset + 1] 1903 // [REX_B] 1904 // f: f7 f9 idiv $div 1905 // 0000000000000011 <done>: 1906 MacroAssembler _masm(&cbuf); 1907 Label normal; 1908 Label done; 1909 1910 // cmp $0x80000000,%eax 1911 __ cmp(as_Register(RAX_enc), 0x80000000); 1912 1913 // jne e <normal> 1914 __ jccb(Assembler::notEqual, normal); 1915 1916 // xor %edx,%edx 1917 __ xorl(as_Register(RDX_enc), as_Register(RDX_enc)); 1918 1919 // cmp $0xffffffffffffffff,%ecx 1920 __ cmpl($div$$Register, -1); 1921 1922 // je 11 <done> 1923 __ jccb(Assembler::equal, done); 1924 1925 // <normal> 1926 // cltd 1927 __ bind(normal); 1928 __ cdql(); 1929 1930 // idivl 1931 // <done> 1932 __ idivl($div$$Register); 1933 __ bind(done); 1934 %} 1935 1936 enc_class cdqq_enc(no_rax_rdx_RegL div) 1937 %{ 1938 // Full implementation of Java ldiv and lrem; checks for 1939 // special case as described in JVM spec., p.243 & p.271. 1940 // 1941 // normal case special case 1942 // 1943 // input : rax: dividend min_long 1944 // reg: divisor -1 1945 // 1946 // output: rax: quotient (= rax idiv reg) min_long 1947 // rdx: remainder (= rax irem reg) 0 1948 // 1949 // Code sequnce: 1950 // 1951 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx 1952 // 7: 00 00 80 1953 // a: 48 39 d0 cmp %rdx,%rax 1954 // d: 75 08 jne 17 <normal> 1955 // f: 33 d2 xor %edx,%edx 1956 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div 1957 // 15: 74 05 je 1c <done> 1958 // 0000000000000017 <normal>: 1959 // 17: 48 99 cqto 1960 // 19: 48 f7 f9 idiv $div 1961 // 000000000000001c <done>: 1962 MacroAssembler _masm(&cbuf); 1963 Label normal; 1964 Label done; 1965 1966 // mov $0x8000000000000000,%rdx 1967 __ mov64(as_Register(RDX_enc), 0x8000000000000000); 1968 1969 // cmp %rdx,%rax 1970 __ cmpq(as_Register(RAX_enc), as_Register(RDX_enc)); 1971 1972 // jne 17 <normal> 1973 __ jccb(Assembler::notEqual, normal); 1974 1975 // xor %edx,%edx 1976 __ xorl(as_Register(RDX_enc), as_Register(RDX_enc)); 1977 1978 // cmp $0xffffffffffffffff,$div 1979 __ cmpq($div$$Register, -1); 1980 1981 // je 1e <done> 1982 __ jccb(Assembler::equal, done); 1983 1984 // <normal> 1985 // cqto 1986 __ bind(normal); 1987 __ cdqq(); 1988 1989 // idivq (note: must be emitted by the user of this rule) 1990 // <done> 1991 __ idivq($div$$Register); 1992 __ bind(done); 1993 %} 1994 1995 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension 1996 enc_class OpcSE(immI imm) 1997 %{ 1998 // Emit primary opcode and set sign-extend bit 1999 // Check for 8-bit immediate, and set sign extend bit in opcode 2000 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2001 emit_opcode(cbuf, $primary | 0x02); 2002 } else { 2003 // 32-bit immediate 2004 emit_opcode(cbuf, $primary); 2005 } 2006 %} 2007 2008 enc_class OpcSErm(rRegI dst, immI imm) 2009 %{ 2010 // OpcSEr/m 2011 int dstenc = $dst$$reg; 2012 if (dstenc >= 8) { 2013 emit_opcode(cbuf, Assembler::REX_B); 2014 dstenc -= 8; 2015 } 2016 // Emit primary opcode and set sign-extend bit 2017 // Check for 8-bit immediate, and set sign extend bit in opcode 2018 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2019 emit_opcode(cbuf, $primary | 0x02); 2020 } else { 2021 // 32-bit immediate 2022 emit_opcode(cbuf, $primary); 2023 } 2024 // Emit r/m byte with secondary opcode, after primary opcode. 2025 emit_rm(cbuf, 0x3, $secondary, dstenc); 2026 %} 2027 2028 enc_class OpcSErm_wide(rRegL dst, immI imm) 2029 %{ 2030 // OpcSEr/m 2031 int dstenc = $dst$$reg; 2032 if (dstenc < 8) { 2033 emit_opcode(cbuf, Assembler::REX_W); 2034 } else { 2035 emit_opcode(cbuf, Assembler::REX_WB); 2036 dstenc -= 8; 2037 } 2038 // Emit primary opcode and set sign-extend bit 2039 // Check for 8-bit immediate, and set sign extend bit in opcode 2040 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2041 emit_opcode(cbuf, $primary | 0x02); 2042 } else { 2043 // 32-bit immediate 2044 emit_opcode(cbuf, $primary); 2045 } 2046 // Emit r/m byte with secondary opcode, after primary opcode. 2047 emit_rm(cbuf, 0x3, $secondary, dstenc); 2048 %} 2049 2050 enc_class Con8or32(immI imm) 2051 %{ 2052 // Check for 8-bit immediate, and set sign extend bit in opcode 2053 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2054 $$$emit8$imm$$constant; 2055 } else { 2056 // 32-bit immediate 2057 $$$emit32$imm$$constant; 2058 } 2059 %} 2060 2061 enc_class opc2_reg(rRegI dst) 2062 %{ 2063 // BSWAP 2064 emit_cc(cbuf, $secondary, $dst$$reg); 2065 %} 2066 2067 enc_class opc3_reg(rRegI dst) 2068 %{ 2069 // BSWAP 2070 emit_cc(cbuf, $tertiary, $dst$$reg); 2071 %} 2072 2073 enc_class reg_opc(rRegI div) 2074 %{ 2075 // INC, DEC, IDIV, IMOD, JMP indirect, ... 2076 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7); 2077 %} 2078 2079 enc_class enc_cmov(cmpOp cop) 2080 %{ 2081 // CMOV 2082 $$$emit8$primary; 2083 emit_cc(cbuf, $secondary, $cop$$cmpcode); 2084 %} 2085 2086 enc_class enc_PartialSubtypeCheck() 2087 %{ 2088 Register Rrdi = as_Register(RDI_enc); // result register 2089 Register Rrax = as_Register(RAX_enc); // super class 2090 Register Rrcx = as_Register(RCX_enc); // killed 2091 Register Rrsi = as_Register(RSI_enc); // sub class 2092 Label miss; 2093 const bool set_cond_codes = true; 2094 2095 MacroAssembler _masm(&cbuf); 2096 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi, 2097 NULL, &miss, 2098 /*set_cond_codes:*/ true); 2099 if ($primary) { 2100 __ xorptr(Rrdi, Rrdi); 2101 } 2102 __ bind(miss); 2103 %} 2104 2105 enc_class clear_avx %{ 2106 debug_only(int off0 = cbuf.insts_size()); 2107 if (generate_vzeroupper(Compile::current())) { 2108 // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty 2109 // Clear upper bits of YMM registers when current compiled code uses 2110 // wide vectors to avoid AVX <-> SSE transition penalty during call. 2111 MacroAssembler _masm(&cbuf); 2112 __ vzeroupper(); 2113 } 2114 debug_only(int off1 = cbuf.insts_size()); 2115 assert(off1 - off0 == clear_avx_size(), "correct size prediction"); 2116 %} 2117 2118 enc_class Java_To_Runtime(method meth) %{ 2119 // No relocation needed 2120 MacroAssembler _masm(&cbuf); 2121 __ mov64(r10, (int64_t) $meth$$method); 2122 __ call(r10); 2123 %} 2124 2125 enc_class Java_To_Interpreter(method meth) 2126 %{ 2127 // CALL Java_To_Interpreter 2128 // This is the instruction starting address for relocation info. 2129 cbuf.set_insts_mark(); 2130 $$$emit8$primary; 2131 // CALL directly to the runtime 2132 emit_d32_reloc(cbuf, 2133 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2134 runtime_call_Relocation::spec(), 2135 RELOC_DISP32); 2136 %} 2137 2138 enc_class Java_Static_Call(method meth) 2139 %{ 2140 // JAVA STATIC CALL 2141 // CALL to fixup routine. Fixup routine uses ScopeDesc info to 2142 // determine who we intended to call. 2143 cbuf.set_insts_mark(); 2144 $$$emit8$primary; 2145 2146 if (!_method) { 2147 emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2148 runtime_call_Relocation::spec(), 2149 RELOC_DISP32); 2150 } else { 2151 int method_index = resolved_method_index(cbuf); 2152 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) 2153 : static_call_Relocation::spec(method_index); 2154 emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2155 rspec, RELOC_DISP32); 2156 // Emit stubs for static call. 2157 address mark = cbuf.insts_mark(); 2158 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark); 2159 if (stub == NULL) { 2160 ciEnv::current()->record_failure("CodeCache is full"); 2161 return; 2162 } 2163 } 2164 %} 2165 2166 enc_class Java_Dynamic_Call(method meth) %{ 2167 MacroAssembler _masm(&cbuf); 2168 __ ic_call((address)$meth$$method, resolved_method_index(cbuf)); 2169 %} 2170 2171 enc_class Java_Compiled_Call(method meth) 2172 %{ 2173 // JAVA COMPILED CALL 2174 int disp = in_bytes(Method:: from_compiled_offset()); 2175 2176 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!! 2177 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small"); 2178 2179 // callq *disp(%rax) 2180 cbuf.set_insts_mark(); 2181 $$$emit8$primary; 2182 if (disp < 0x80) { 2183 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte 2184 emit_d8(cbuf, disp); // Displacement 2185 } else { 2186 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte 2187 emit_d32(cbuf, disp); // Displacement 2188 } 2189 %} 2190 2191 enc_class reg_opc_imm(rRegI dst, immI8 shift) 2192 %{ 2193 // SAL, SAR, SHR 2194 int dstenc = $dst$$reg; 2195 if (dstenc >= 8) { 2196 emit_opcode(cbuf, Assembler::REX_B); 2197 dstenc -= 8; 2198 } 2199 $$$emit8$primary; 2200 emit_rm(cbuf, 0x3, $secondary, dstenc); 2201 $$$emit8$shift$$constant; 2202 %} 2203 2204 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift) 2205 %{ 2206 // SAL, SAR, SHR 2207 int dstenc = $dst$$reg; 2208 if (dstenc < 8) { 2209 emit_opcode(cbuf, Assembler::REX_W); 2210 } else { 2211 emit_opcode(cbuf, Assembler::REX_WB); 2212 dstenc -= 8; 2213 } 2214 $$$emit8$primary; 2215 emit_rm(cbuf, 0x3, $secondary, dstenc); 2216 $$$emit8$shift$$constant; 2217 %} 2218 2219 enc_class load_immI(rRegI dst, immI src) 2220 %{ 2221 int dstenc = $dst$$reg; 2222 if (dstenc >= 8) { 2223 emit_opcode(cbuf, Assembler::REX_B); 2224 dstenc -= 8; 2225 } 2226 emit_opcode(cbuf, 0xB8 | dstenc); 2227 $$$emit32$src$$constant; 2228 %} 2229 2230 enc_class load_immL(rRegL dst, immL src) 2231 %{ 2232 int dstenc = $dst$$reg; 2233 if (dstenc < 8) { 2234 emit_opcode(cbuf, Assembler::REX_W); 2235 } else { 2236 emit_opcode(cbuf, Assembler::REX_WB); 2237 dstenc -= 8; 2238 } 2239 emit_opcode(cbuf, 0xB8 | dstenc); 2240 emit_d64(cbuf, $src$$constant); 2241 %} 2242 2243 enc_class load_immUL32(rRegL dst, immUL32 src) 2244 %{ 2245 // same as load_immI, but this time we care about zeroes in the high word 2246 int dstenc = $dst$$reg; 2247 if (dstenc >= 8) { 2248 emit_opcode(cbuf, Assembler::REX_B); 2249 dstenc -= 8; 2250 } 2251 emit_opcode(cbuf, 0xB8 | dstenc); 2252 $$$emit32$src$$constant; 2253 %} 2254 2255 enc_class load_immL32(rRegL dst, immL32 src) 2256 %{ 2257 int dstenc = $dst$$reg; 2258 if (dstenc < 8) { 2259 emit_opcode(cbuf, Assembler::REX_W); 2260 } else { 2261 emit_opcode(cbuf, Assembler::REX_WB); 2262 dstenc -= 8; 2263 } 2264 emit_opcode(cbuf, 0xC7); 2265 emit_rm(cbuf, 0x03, 0x00, dstenc); 2266 $$$emit32$src$$constant; 2267 %} 2268 2269 enc_class load_immP31(rRegP dst, immP32 src) 2270 %{ 2271 // same as load_immI, but this time we care about zeroes in the high word 2272 int dstenc = $dst$$reg; 2273 if (dstenc >= 8) { 2274 emit_opcode(cbuf, Assembler::REX_B); 2275 dstenc -= 8; 2276 } 2277 emit_opcode(cbuf, 0xB8 | dstenc); 2278 $$$emit32$src$$constant; 2279 %} 2280 2281 enc_class load_immP(rRegP dst, immP src) 2282 %{ 2283 int dstenc = $dst$$reg; 2284 if (dstenc < 8) { 2285 emit_opcode(cbuf, Assembler::REX_W); 2286 } else { 2287 emit_opcode(cbuf, Assembler::REX_WB); 2288 dstenc -= 8; 2289 } 2290 emit_opcode(cbuf, 0xB8 | dstenc); 2291 // This next line should be generated from ADLC 2292 if ($src->constant_reloc() != relocInfo::none) { 2293 emit_d64_reloc(cbuf, $src$$constant, $src->constant_reloc(), RELOC_IMM64); 2294 } else { 2295 emit_d64(cbuf, $src$$constant); 2296 } 2297 %} 2298 2299 enc_class Con32(immI src) 2300 %{ 2301 // Output immediate 2302 $$$emit32$src$$constant; 2303 %} 2304 2305 enc_class Con32F_as_bits(immF src) 2306 %{ 2307 // Output Float immediate bits 2308 jfloat jf = $src$$constant; 2309 jint jf_as_bits = jint_cast(jf); 2310 emit_d32(cbuf, jf_as_bits); 2311 %} 2312 2313 enc_class Con16(immI src) 2314 %{ 2315 // Output immediate 2316 $$$emit16$src$$constant; 2317 %} 2318 2319 // How is this different from Con32??? XXX 2320 enc_class Con_d32(immI src) 2321 %{ 2322 emit_d32(cbuf,$src$$constant); 2323 %} 2324 2325 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI) 2326 // Output immediate memory reference 2327 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 ); 2328 emit_d32(cbuf, 0x00); 2329 %} 2330 2331 enc_class lock_prefix() 2332 %{ 2333 emit_opcode(cbuf, 0xF0); // lock 2334 %} 2335 2336 enc_class REX_mem(memory mem) 2337 %{ 2338 if ($mem$$base >= 8) { 2339 if ($mem$$index < 8) { 2340 emit_opcode(cbuf, Assembler::REX_B); 2341 } else { 2342 emit_opcode(cbuf, Assembler::REX_XB); 2343 } 2344 } else { 2345 if ($mem$$index >= 8) { 2346 emit_opcode(cbuf, Assembler::REX_X); 2347 } 2348 } 2349 %} 2350 2351 enc_class REX_mem_wide(memory mem) 2352 %{ 2353 if ($mem$$base >= 8) { 2354 if ($mem$$index < 8) { 2355 emit_opcode(cbuf, Assembler::REX_WB); 2356 } else { 2357 emit_opcode(cbuf, Assembler::REX_WXB); 2358 } 2359 } else { 2360 if ($mem$$index < 8) { 2361 emit_opcode(cbuf, Assembler::REX_W); 2362 } else { 2363 emit_opcode(cbuf, Assembler::REX_WX); 2364 } 2365 } 2366 %} 2367 2368 // for byte regs 2369 enc_class REX_breg(rRegI reg) 2370 %{ 2371 if ($reg$$reg >= 4) { 2372 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B); 2373 } 2374 %} 2375 2376 // for byte regs 2377 enc_class REX_reg_breg(rRegI dst, rRegI src) 2378 %{ 2379 if ($dst$$reg < 8) { 2380 if ($src$$reg >= 4) { 2381 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B); 2382 } 2383 } else { 2384 if ($src$$reg < 8) { 2385 emit_opcode(cbuf, Assembler::REX_R); 2386 } else { 2387 emit_opcode(cbuf, Assembler::REX_RB); 2388 } 2389 } 2390 %} 2391 2392 // for byte regs 2393 enc_class REX_breg_mem(rRegI reg, memory mem) 2394 %{ 2395 if ($reg$$reg < 8) { 2396 if ($mem$$base < 8) { 2397 if ($mem$$index >= 8) { 2398 emit_opcode(cbuf, Assembler::REX_X); 2399 } else if ($reg$$reg >= 4) { 2400 emit_opcode(cbuf, Assembler::REX); 2401 } 2402 } else { 2403 if ($mem$$index < 8) { 2404 emit_opcode(cbuf, Assembler::REX_B); 2405 } else { 2406 emit_opcode(cbuf, Assembler::REX_XB); 2407 } 2408 } 2409 } else { 2410 if ($mem$$base < 8) { 2411 if ($mem$$index < 8) { 2412 emit_opcode(cbuf, Assembler::REX_R); 2413 } else { 2414 emit_opcode(cbuf, Assembler::REX_RX); 2415 } 2416 } else { 2417 if ($mem$$index < 8) { 2418 emit_opcode(cbuf, Assembler::REX_RB); 2419 } else { 2420 emit_opcode(cbuf, Assembler::REX_RXB); 2421 } 2422 } 2423 } 2424 %} 2425 2426 enc_class REX_reg(rRegI reg) 2427 %{ 2428 if ($reg$$reg >= 8) { 2429 emit_opcode(cbuf, Assembler::REX_B); 2430 } 2431 %} 2432 2433 enc_class REX_reg_wide(rRegI reg) 2434 %{ 2435 if ($reg$$reg < 8) { 2436 emit_opcode(cbuf, Assembler::REX_W); 2437 } else { 2438 emit_opcode(cbuf, Assembler::REX_WB); 2439 } 2440 %} 2441 2442 enc_class REX_reg_reg(rRegI dst, rRegI src) 2443 %{ 2444 if ($dst$$reg < 8) { 2445 if ($src$$reg >= 8) { 2446 emit_opcode(cbuf, Assembler::REX_B); 2447 } 2448 } else { 2449 if ($src$$reg < 8) { 2450 emit_opcode(cbuf, Assembler::REX_R); 2451 } else { 2452 emit_opcode(cbuf, Assembler::REX_RB); 2453 } 2454 } 2455 %} 2456 2457 enc_class REX_reg_reg_wide(rRegI dst, rRegI src) 2458 %{ 2459 if ($dst$$reg < 8) { 2460 if ($src$$reg < 8) { 2461 emit_opcode(cbuf, Assembler::REX_W); 2462 } else { 2463 emit_opcode(cbuf, Assembler::REX_WB); 2464 } 2465 } else { 2466 if ($src$$reg < 8) { 2467 emit_opcode(cbuf, Assembler::REX_WR); 2468 } else { 2469 emit_opcode(cbuf, Assembler::REX_WRB); 2470 } 2471 } 2472 %} 2473 2474 enc_class REX_reg_mem(rRegI reg, memory mem) 2475 %{ 2476 if ($reg$$reg < 8) { 2477 if ($mem$$base < 8) { 2478 if ($mem$$index >= 8) { 2479 emit_opcode(cbuf, Assembler::REX_X); 2480 } 2481 } else { 2482 if ($mem$$index < 8) { 2483 emit_opcode(cbuf, Assembler::REX_B); 2484 } else { 2485 emit_opcode(cbuf, Assembler::REX_XB); 2486 } 2487 } 2488 } else { 2489 if ($mem$$base < 8) { 2490 if ($mem$$index < 8) { 2491 emit_opcode(cbuf, Assembler::REX_R); 2492 } else { 2493 emit_opcode(cbuf, Assembler::REX_RX); 2494 } 2495 } else { 2496 if ($mem$$index < 8) { 2497 emit_opcode(cbuf, Assembler::REX_RB); 2498 } else { 2499 emit_opcode(cbuf, Assembler::REX_RXB); 2500 } 2501 } 2502 } 2503 %} 2504 2505 enc_class REX_reg_mem_wide(rRegL reg, memory mem) 2506 %{ 2507 if ($reg$$reg < 8) { 2508 if ($mem$$base < 8) { 2509 if ($mem$$index < 8) { 2510 emit_opcode(cbuf, Assembler::REX_W); 2511 } else { 2512 emit_opcode(cbuf, Assembler::REX_WX); 2513 } 2514 } else { 2515 if ($mem$$index < 8) { 2516 emit_opcode(cbuf, Assembler::REX_WB); 2517 } else { 2518 emit_opcode(cbuf, Assembler::REX_WXB); 2519 } 2520 } 2521 } else { 2522 if ($mem$$base < 8) { 2523 if ($mem$$index < 8) { 2524 emit_opcode(cbuf, Assembler::REX_WR); 2525 } else { 2526 emit_opcode(cbuf, Assembler::REX_WRX); 2527 } 2528 } else { 2529 if ($mem$$index < 8) { 2530 emit_opcode(cbuf, Assembler::REX_WRB); 2531 } else { 2532 emit_opcode(cbuf, Assembler::REX_WRXB); 2533 } 2534 } 2535 } 2536 %} 2537 2538 enc_class reg_mem(rRegI ereg, memory mem) 2539 %{ 2540 // High registers handle in encode_RegMem 2541 int reg = $ereg$$reg; 2542 int base = $mem$$base; 2543 int index = $mem$$index; 2544 int scale = $mem$$scale; 2545 int disp = $mem$$disp; 2546 relocInfo::relocType disp_reloc = $mem->disp_reloc(); 2547 2548 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_reloc); 2549 %} 2550 2551 enc_class RM_opc_mem(immI rm_opcode, memory mem) 2552 %{ 2553 int rm_byte_opcode = $rm_opcode$$constant; 2554 2555 // High registers handle in encode_RegMem 2556 int base = $mem$$base; 2557 int index = $mem$$index; 2558 int scale = $mem$$scale; 2559 int displace = $mem$$disp; 2560 2561 relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when 2562 // working with static 2563 // globals 2564 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, 2565 disp_reloc); 2566 %} 2567 2568 enc_class reg_lea(rRegI dst, rRegI src0, immI src1) 2569 %{ 2570 int reg_encoding = $dst$$reg; 2571 int base = $src0$$reg; // 0xFFFFFFFF indicates no base 2572 int index = 0x04; // 0x04 indicates no index 2573 int scale = 0x00; // 0x00 indicates no scale 2574 int displace = $src1$$constant; // 0x00 indicates no displacement 2575 relocInfo::relocType disp_reloc = relocInfo::none; 2576 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, 2577 disp_reloc); 2578 %} 2579 2580 enc_class neg_reg(rRegI dst) 2581 %{ 2582 int dstenc = $dst$$reg; 2583 if (dstenc >= 8) { 2584 emit_opcode(cbuf, Assembler::REX_B); 2585 dstenc -= 8; 2586 } 2587 // NEG $dst 2588 emit_opcode(cbuf, 0xF7); 2589 emit_rm(cbuf, 0x3, 0x03, dstenc); 2590 %} 2591 2592 enc_class neg_reg_wide(rRegI dst) 2593 %{ 2594 int dstenc = $dst$$reg; 2595 if (dstenc < 8) { 2596 emit_opcode(cbuf, Assembler::REX_W); 2597 } else { 2598 emit_opcode(cbuf, Assembler::REX_WB); 2599 dstenc -= 8; 2600 } 2601 // NEG $dst 2602 emit_opcode(cbuf, 0xF7); 2603 emit_rm(cbuf, 0x3, 0x03, dstenc); 2604 %} 2605 2606 enc_class setLT_reg(rRegI dst) 2607 %{ 2608 int dstenc = $dst$$reg; 2609 if (dstenc >= 8) { 2610 emit_opcode(cbuf, Assembler::REX_B); 2611 dstenc -= 8; 2612 } else if (dstenc >= 4) { 2613 emit_opcode(cbuf, Assembler::REX); 2614 } 2615 // SETLT $dst 2616 emit_opcode(cbuf, 0x0F); 2617 emit_opcode(cbuf, 0x9C); 2618 emit_rm(cbuf, 0x3, 0x0, dstenc); 2619 %} 2620 2621 enc_class setNZ_reg(rRegI dst) 2622 %{ 2623 int dstenc = $dst$$reg; 2624 if (dstenc >= 8) { 2625 emit_opcode(cbuf, Assembler::REX_B); 2626 dstenc -= 8; 2627 } else if (dstenc >= 4) { 2628 emit_opcode(cbuf, Assembler::REX); 2629 } 2630 // SETNZ $dst 2631 emit_opcode(cbuf, 0x0F); 2632 emit_opcode(cbuf, 0x95); 2633 emit_rm(cbuf, 0x3, 0x0, dstenc); 2634 %} 2635 2636 2637 // Compare the lonogs and set -1, 0, or 1 into dst 2638 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst) 2639 %{ 2640 int src1enc = $src1$$reg; 2641 int src2enc = $src2$$reg; 2642 int dstenc = $dst$$reg; 2643 2644 // cmpq $src1, $src2 2645 if (src1enc < 8) { 2646 if (src2enc < 8) { 2647 emit_opcode(cbuf, Assembler::REX_W); 2648 } else { 2649 emit_opcode(cbuf, Assembler::REX_WB); 2650 } 2651 } else { 2652 if (src2enc < 8) { 2653 emit_opcode(cbuf, Assembler::REX_WR); 2654 } else { 2655 emit_opcode(cbuf, Assembler::REX_WRB); 2656 } 2657 } 2658 emit_opcode(cbuf, 0x3B); 2659 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7); 2660 2661 // movl $dst, -1 2662 if (dstenc >= 8) { 2663 emit_opcode(cbuf, Assembler::REX_B); 2664 } 2665 emit_opcode(cbuf, 0xB8 | (dstenc & 7)); 2666 emit_d32(cbuf, -1); 2667 2668 // jl,s done 2669 emit_opcode(cbuf, 0x7C); 2670 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08); 2671 2672 // setne $dst 2673 if (dstenc >= 4) { 2674 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B); 2675 } 2676 emit_opcode(cbuf, 0x0F); 2677 emit_opcode(cbuf, 0x95); 2678 emit_opcode(cbuf, 0xC0 | (dstenc & 7)); 2679 2680 // movzbl $dst, $dst 2681 if (dstenc >= 4) { 2682 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB); 2683 } 2684 emit_opcode(cbuf, 0x0F); 2685 emit_opcode(cbuf, 0xB6); 2686 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7); 2687 %} 2688 2689 enc_class Push_ResultXD(regD dst) %{ 2690 MacroAssembler _masm(&cbuf); 2691 __ fstp_d(Address(rsp, 0)); 2692 __ movdbl($dst$$XMMRegister, Address(rsp, 0)); 2693 __ addptr(rsp, 8); 2694 %} 2695 2696 enc_class Push_SrcXD(regD src) %{ 2697 MacroAssembler _masm(&cbuf); 2698 __ subptr(rsp, 8); 2699 __ movdbl(Address(rsp, 0), $src$$XMMRegister); 2700 __ fld_d(Address(rsp, 0)); 2701 %} 2702 2703 2704 enc_class enc_rethrow() 2705 %{ 2706 cbuf.set_insts_mark(); 2707 emit_opcode(cbuf, 0xE9); // jmp entry 2708 emit_d32_reloc(cbuf, 2709 (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4), 2710 runtime_call_Relocation::spec(), 2711 RELOC_DISP32); 2712 %} 2713 2714 %} 2715 2716 2717 2718 //----------FRAME-------------------------------------------------------------- 2719 // Definition of frame structure and management information. 2720 // 2721 // S T A C K L A Y O U T Allocators stack-slot number 2722 // | (to get allocators register number 2723 // G Owned by | | v add OptoReg::stack0()) 2724 // r CALLER | | 2725 // o | +--------+ pad to even-align allocators stack-slot 2726 // w V | pad0 | numbers; owned by CALLER 2727 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 2728 // h ^ | in | 5 2729 // | | args | 4 Holes in incoming args owned by SELF 2730 // | | | | 3 2731 // | | +--------+ 2732 // V | | old out| Empty on Intel, window on Sparc 2733 // | old |preserve| Must be even aligned. 2734 // | SP-+--------+----> Matcher::_old_SP, even aligned 2735 // | | in | 3 area for Intel ret address 2736 // Owned by |preserve| Empty on Sparc. 2737 // SELF +--------+ 2738 // | | pad2 | 2 pad to align old SP 2739 // | +--------+ 1 2740 // | | locks | 0 2741 // | +--------+----> OptoReg::stack0(), even aligned 2742 // | | pad1 | 11 pad to align new SP 2743 // | +--------+ 2744 // | | | 10 2745 // | | spills | 9 spills 2746 // V | | 8 (pad0 slot for callee) 2747 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 2748 // ^ | out | 7 2749 // | | args | 6 Holes in outgoing args owned by CALLEE 2750 // Owned by +--------+ 2751 // CALLEE | new out| 6 Empty on Intel, window on Sparc 2752 // | new |preserve| Must be even-aligned. 2753 // | SP-+--------+----> Matcher::_new_SP, even aligned 2754 // | | | 2755 // 2756 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 2757 // known from SELF's arguments and the Java calling convention. 2758 // Region 6-7 is determined per call site. 2759 // Note 2: If the calling convention leaves holes in the incoming argument 2760 // area, those holes are owned by SELF. Holes in the outgoing area 2761 // are owned by the CALLEE. Holes should not be nessecary in the 2762 // incoming area, as the Java calling convention is completely under 2763 // the control of the AD file. Doubles can be sorted and packed to 2764 // avoid holes. Holes in the outgoing arguments may be nessecary for 2765 // varargs C calling conventions. 2766 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 2767 // even aligned with pad0 as needed. 2768 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 2769 // region 6-11 is even aligned; it may be padded out more so that 2770 // the region from SP to FP meets the minimum stack alignment. 2771 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack 2772 // alignment. Region 11, pad1, may be dynamically extended so that 2773 // SP meets the minimum alignment. 2774 2775 frame 2776 %{ 2777 // These three registers define part of the calling convention 2778 // between compiled code and the interpreter. 2779 inline_cache_reg(RAX); // Inline Cache Register 2780 2781 // Optional: name the operand used by cisc-spilling to access 2782 // [stack_pointer + offset] 2783 cisc_spilling_operand_name(indOffset32); 2784 2785 // Number of stack slots consumed by locking an object 2786 sync_stack_slots(2); 2787 2788 // Compiled code's Frame Pointer 2789 frame_pointer(RSP); 2790 2791 // Interpreter stores its frame pointer in a register which is 2792 // stored to the stack by I2CAdaptors. 2793 // I2CAdaptors convert from interpreted java to compiled java. 2794 interpreter_frame_pointer(RBP); 2795 2796 // Stack alignment requirement 2797 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) 2798 2799 // Number of outgoing stack slots killed above the out_preserve_stack_slots 2800 // for calls to C. Supports the var-args backing area for register parms. 2801 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt); 2802 2803 // The after-PROLOG location of the return address. Location of 2804 // return address specifies a type (REG or STACK) and a number 2805 // representing the register number (i.e. - use a register name) or 2806 // stack slot. 2807 // Ret Addr is on stack in slot 0 if no locks or verification or alignment. 2808 // Otherwise, it is above the locks and verification slot and alignment word 2809 return_addr(STACK - 2 + 2810 align_up((Compile::current()->in_preserve_stack_slots() + 2811 Compile::current()->fixed_slots()), 2812 stack_alignment_in_slots())); 2813 2814 // Location of compiled Java return values. Same as C for now. 2815 return_value 2816 %{ 2817 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, 2818 "only return normal values"); 2819 2820 static const int lo[Op_RegL + 1] = { 2821 0, 2822 0, 2823 RAX_num, // Op_RegN 2824 RAX_num, // Op_RegI 2825 RAX_num, // Op_RegP 2826 XMM0_num, // Op_RegF 2827 XMM0_num, // Op_RegD 2828 RAX_num // Op_RegL 2829 }; 2830 static const int hi[Op_RegL + 1] = { 2831 0, 2832 0, 2833 OptoReg::Bad, // Op_RegN 2834 OptoReg::Bad, // Op_RegI 2835 RAX_H_num, // Op_RegP 2836 OptoReg::Bad, // Op_RegF 2837 XMM0b_num, // Op_RegD 2838 RAX_H_num // Op_RegL 2839 }; 2840 // Excluded flags and vector registers. 2841 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 8, "missing type"); 2842 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); 2843 %} 2844 %} 2845 2846 //----------ATTRIBUTES--------------------------------------------------------- 2847 //----------Operand Attributes------------------------------------------------- 2848 op_attrib op_cost(0); // Required cost attribute 2849 2850 //----------Instruction Attributes--------------------------------------------- 2851 ins_attrib ins_cost(100); // Required cost attribute 2852 ins_attrib ins_size(8); // Required size attribute (in bits) 2853 ins_attrib ins_short_branch(0); // Required flag: is this instruction 2854 // a non-matching short branch variant 2855 // of some long branch? 2856 ins_attrib ins_alignment(1); // Required alignment attribute (must 2857 // be a power of 2) specifies the 2858 // alignment that some part of the 2859 // instruction (not necessarily the 2860 // start) requires. If > 1, a 2861 // compute_padding() function must be 2862 // provided for the instruction 2863 2864 //----------OPERANDS----------------------------------------------------------- 2865 // Operand definitions must precede instruction definitions for correct parsing 2866 // in the ADLC because operands constitute user defined types which are used in 2867 // instruction definitions. 2868 2869 //----------Simple Operands---------------------------------------------------- 2870 // Immediate Operands 2871 // Integer Immediate 2872 operand immI() 2873 %{ 2874 match(ConI); 2875 2876 op_cost(10); 2877 format %{ %} 2878 interface(CONST_INTER); 2879 %} 2880 2881 // Constant for test vs zero 2882 operand immI_0() 2883 %{ 2884 predicate(n->get_int() == 0); 2885 match(ConI); 2886 2887 op_cost(0); 2888 format %{ %} 2889 interface(CONST_INTER); 2890 %} 2891 2892 // Constant for increment 2893 operand immI_1() 2894 %{ 2895 predicate(n->get_int() == 1); 2896 match(ConI); 2897 2898 op_cost(0); 2899 format %{ %} 2900 interface(CONST_INTER); 2901 %} 2902 2903 // Constant for decrement 2904 operand immI_M1() 2905 %{ 2906 predicate(n->get_int() == -1); 2907 match(ConI); 2908 2909 op_cost(0); 2910 format %{ %} 2911 interface(CONST_INTER); 2912 %} 2913 2914 operand immI_2() 2915 %{ 2916 predicate(n->get_int() == 2); 2917 match(ConI); 2918 2919 op_cost(0); 2920 format %{ %} 2921 interface(CONST_INTER); 2922 %} 2923 2924 operand immI_4() 2925 %{ 2926 predicate(n->get_int() == 4); 2927 match(ConI); 2928 2929 op_cost(0); 2930 format %{ %} 2931 interface(CONST_INTER); 2932 %} 2933 2934 operand immI_8() 2935 %{ 2936 predicate(n->get_int() == 8); 2937 match(ConI); 2938 2939 op_cost(0); 2940 format %{ %} 2941 interface(CONST_INTER); 2942 %} 2943 2944 // Valid scale values for addressing modes 2945 operand immI2() 2946 %{ 2947 predicate(0 <= n->get_int() && (n->get_int() <= 3)); 2948 match(ConI); 2949 2950 format %{ %} 2951 interface(CONST_INTER); 2952 %} 2953 2954 operand immU7() 2955 %{ 2956 predicate((0 <= n->get_int()) && (n->get_int() <= 0x7F)); 2957 match(ConI); 2958 2959 op_cost(5); 2960 format %{ %} 2961 interface(CONST_INTER); 2962 %} 2963 2964 operand immI8() 2965 %{ 2966 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80)); 2967 match(ConI); 2968 2969 op_cost(5); 2970 format %{ %} 2971 interface(CONST_INTER); 2972 %} 2973 2974 operand immU8() 2975 %{ 2976 predicate((0 <= n->get_int()) && (n->get_int() <= 255)); 2977 match(ConI); 2978 2979 op_cost(5); 2980 format %{ %} 2981 interface(CONST_INTER); 2982 %} 2983 2984 operand immI16() 2985 %{ 2986 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767)); 2987 match(ConI); 2988 2989 op_cost(10); 2990 format %{ %} 2991 interface(CONST_INTER); 2992 %} 2993 2994 // Int Immediate non-negative 2995 operand immU31() 2996 %{ 2997 predicate(n->get_int() >= 0); 2998 match(ConI); 2999 3000 op_cost(0); 3001 format %{ %} 3002 interface(CONST_INTER); 3003 %} 3004 3005 // Constant for long shifts 3006 operand immI_32() 3007 %{ 3008 predicate( n->get_int() == 32 ); 3009 match(ConI); 3010 3011 op_cost(0); 3012 format %{ %} 3013 interface(CONST_INTER); 3014 %} 3015 3016 // Constant for long shifts 3017 operand immI_64() 3018 %{ 3019 predicate( n->get_int() == 64 ); 3020 match(ConI); 3021 3022 op_cost(0); 3023 format %{ %} 3024 interface(CONST_INTER); 3025 %} 3026 3027 // Pointer Immediate 3028 operand immP() 3029 %{ 3030 match(ConP); 3031 3032 op_cost(10); 3033 format %{ %} 3034 interface(CONST_INTER); 3035 %} 3036 3037 // NULL Pointer Immediate 3038 operand immP0() 3039 %{ 3040 predicate(n->get_ptr() == 0); 3041 match(ConP); 3042 3043 op_cost(5); 3044 format %{ %} 3045 interface(CONST_INTER); 3046 %} 3047 3048 // Pointer Immediate 3049 operand immN() %{ 3050 match(ConN); 3051 3052 op_cost(10); 3053 format %{ %} 3054 interface(CONST_INTER); 3055 %} 3056 3057 operand immNKlass() %{ 3058 match(ConNKlass); 3059 3060 op_cost(10); 3061 format %{ %} 3062 interface(CONST_INTER); 3063 %} 3064 3065 // NULL Pointer Immediate 3066 operand immN0() %{ 3067 predicate(n->get_narrowcon() == 0); 3068 match(ConN); 3069 3070 op_cost(5); 3071 format %{ %} 3072 interface(CONST_INTER); 3073 %} 3074 3075 operand immP31() 3076 %{ 3077 predicate(n->as_Type()->type()->reloc() == relocInfo::none 3078 && (n->get_ptr() >> 31) == 0); 3079 match(ConP); 3080 3081 op_cost(5); 3082 format %{ %} 3083 interface(CONST_INTER); 3084 %} 3085 3086 3087 // Long Immediate 3088 operand immL() 3089 %{ 3090 match(ConL); 3091 3092 op_cost(20); 3093 format %{ %} 3094 interface(CONST_INTER); 3095 %} 3096 3097 // Long Immediate 8-bit 3098 operand immL8() 3099 %{ 3100 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L); 3101 match(ConL); 3102 3103 op_cost(5); 3104 format %{ %} 3105 interface(CONST_INTER); 3106 %} 3107 3108 // Long Immediate 32-bit unsigned 3109 operand immUL32() 3110 %{ 3111 predicate(n->get_long() == (unsigned int) (n->get_long())); 3112 match(ConL); 3113 3114 op_cost(10); 3115 format %{ %} 3116 interface(CONST_INTER); 3117 %} 3118 3119 // Long Immediate 32-bit signed 3120 operand immL32() 3121 %{ 3122 predicate(n->get_long() == (int) (n->get_long())); 3123 match(ConL); 3124 3125 op_cost(15); 3126 format %{ %} 3127 interface(CONST_INTER); 3128 %} 3129 3130 operand immL_Pow2() 3131 %{ 3132 predicate(is_power_of_2((julong)n->get_long())); 3133 match(ConL); 3134 3135 op_cost(15); 3136 format %{ %} 3137 interface(CONST_INTER); 3138 %} 3139 3140 operand immL_NotPow2() 3141 %{ 3142 predicate(is_power_of_2((julong)~n->get_long())); 3143 match(ConL); 3144 3145 op_cost(15); 3146 format %{ %} 3147 interface(CONST_INTER); 3148 %} 3149 3150 // Long Immediate zero 3151 operand immL0() 3152 %{ 3153 predicate(n->get_long() == 0L); 3154 match(ConL); 3155 3156 op_cost(10); 3157 format %{ %} 3158 interface(CONST_INTER); 3159 %} 3160 3161 // Constant for increment 3162 operand immL1() 3163 %{ 3164 predicate(n->get_long() == 1); 3165 match(ConL); 3166 3167 format %{ %} 3168 interface(CONST_INTER); 3169 %} 3170 3171 // Constant for decrement 3172 operand immL_M1() 3173 %{ 3174 predicate(n->get_long() == -1); 3175 match(ConL); 3176 3177 format %{ %} 3178 interface(CONST_INTER); 3179 %} 3180 3181 // Long Immediate: the value 10 3182 operand immL10() 3183 %{ 3184 predicate(n->get_long() == 10); 3185 match(ConL); 3186 3187 format %{ %} 3188 interface(CONST_INTER); 3189 %} 3190 3191 // Long immediate from 0 to 127. 3192 // Used for a shorter form of long mul by 10. 3193 operand immL_127() 3194 %{ 3195 predicate(0 <= n->get_long() && n->get_long() < 0x80); 3196 match(ConL); 3197 3198 op_cost(10); 3199 format %{ %} 3200 interface(CONST_INTER); 3201 %} 3202 3203 // Long Immediate: low 32-bit mask 3204 operand immL_32bits() 3205 %{ 3206 predicate(n->get_long() == 0xFFFFFFFFL); 3207 match(ConL); 3208 op_cost(20); 3209 3210 format %{ %} 3211 interface(CONST_INTER); 3212 %} 3213 3214 // Int Immediate: 2^n-1, postive 3215 operand immI_Pow2M1() 3216 %{ 3217 predicate((n->get_int() > 0) 3218 && is_power_of_2(n->get_int() + 1)); 3219 match(ConI); 3220 3221 op_cost(20); 3222 format %{ %} 3223 interface(CONST_INTER); 3224 %} 3225 3226 // Float Immediate zero 3227 operand immF0() 3228 %{ 3229 predicate(jint_cast(n->getf()) == 0); 3230 match(ConF); 3231 3232 op_cost(5); 3233 format %{ %} 3234 interface(CONST_INTER); 3235 %} 3236 3237 // Float Immediate 3238 operand immF() 3239 %{ 3240 match(ConF); 3241 3242 op_cost(15); 3243 format %{ %} 3244 interface(CONST_INTER); 3245 %} 3246 3247 // Double Immediate zero 3248 operand immD0() 3249 %{ 3250 predicate(jlong_cast(n->getd()) == 0); 3251 match(ConD); 3252 3253 op_cost(5); 3254 format %{ %} 3255 interface(CONST_INTER); 3256 %} 3257 3258 // Double Immediate 3259 operand immD() 3260 %{ 3261 match(ConD); 3262 3263 op_cost(15); 3264 format %{ %} 3265 interface(CONST_INTER); 3266 %} 3267 3268 // Immediates for special shifts (sign extend) 3269 3270 // Constants for increment 3271 operand immI_16() 3272 %{ 3273 predicate(n->get_int() == 16); 3274 match(ConI); 3275 3276 format %{ %} 3277 interface(CONST_INTER); 3278 %} 3279 3280 operand immI_24() 3281 %{ 3282 predicate(n->get_int() == 24); 3283 match(ConI); 3284 3285 format %{ %} 3286 interface(CONST_INTER); 3287 %} 3288 3289 // Constant for byte-wide masking 3290 operand immI_255() 3291 %{ 3292 predicate(n->get_int() == 255); 3293 match(ConI); 3294 3295 format %{ %} 3296 interface(CONST_INTER); 3297 %} 3298 3299 // Constant for short-wide masking 3300 operand immI_65535() 3301 %{ 3302 predicate(n->get_int() == 65535); 3303 match(ConI); 3304 3305 format %{ %} 3306 interface(CONST_INTER); 3307 %} 3308 3309 // Constant for byte-wide masking 3310 operand immL_255() 3311 %{ 3312 predicate(n->get_long() == 255); 3313 match(ConL); 3314 3315 format %{ %} 3316 interface(CONST_INTER); 3317 %} 3318 3319 // Constant for short-wide masking 3320 operand immL_65535() 3321 %{ 3322 predicate(n->get_long() == 65535); 3323 match(ConL); 3324 3325 format %{ %} 3326 interface(CONST_INTER); 3327 %} 3328 3329 operand kReg() 3330 %{ 3331 constraint(ALLOC_IN_RC(vectmask_reg)); 3332 match(RegVectMask); 3333 format %{%} 3334 interface(REG_INTER); 3335 %} 3336 3337 operand kReg_K1() 3338 %{ 3339 constraint(ALLOC_IN_RC(vectmask_reg_K1)); 3340 match(RegVectMask); 3341 format %{%} 3342 interface(REG_INTER); 3343 %} 3344 3345 operand kReg_K2() 3346 %{ 3347 constraint(ALLOC_IN_RC(vectmask_reg_K2)); 3348 match(RegVectMask); 3349 format %{%} 3350 interface(REG_INTER); 3351 %} 3352 3353 // Special Registers 3354 operand kReg_K3() 3355 %{ 3356 constraint(ALLOC_IN_RC(vectmask_reg_K3)); 3357 match(RegVectMask); 3358 format %{%} 3359 interface(REG_INTER); 3360 %} 3361 3362 operand kReg_K4() 3363 %{ 3364 constraint(ALLOC_IN_RC(vectmask_reg_K4)); 3365 match(RegVectMask); 3366 format %{%} 3367 interface(REG_INTER); 3368 %} 3369 3370 operand kReg_K5() 3371 %{ 3372 constraint(ALLOC_IN_RC(vectmask_reg_K5)); 3373 match(RegVectMask); 3374 format %{%} 3375 interface(REG_INTER); 3376 %} 3377 3378 operand kReg_K6() 3379 %{ 3380 constraint(ALLOC_IN_RC(vectmask_reg_K6)); 3381 match(RegVectMask); 3382 format %{%} 3383 interface(REG_INTER); 3384 %} 3385 3386 // Special Registers 3387 operand kReg_K7() 3388 %{ 3389 constraint(ALLOC_IN_RC(vectmask_reg_K7)); 3390 match(RegVectMask); 3391 format %{%} 3392 interface(REG_INTER); 3393 %} 3394 3395 // Register Operands 3396 // Integer Register 3397 operand rRegI() 3398 %{ 3399 constraint(ALLOC_IN_RC(int_reg)); 3400 match(RegI); 3401 3402 match(rax_RegI); 3403 match(rbx_RegI); 3404 match(rcx_RegI); 3405 match(rdx_RegI); 3406 match(rdi_RegI); 3407 3408 format %{ %} 3409 interface(REG_INTER); 3410 %} 3411 3412 // Special Registers 3413 operand rax_RegI() 3414 %{ 3415 constraint(ALLOC_IN_RC(int_rax_reg)); 3416 match(RegI); 3417 match(rRegI); 3418 3419 format %{ "RAX" %} 3420 interface(REG_INTER); 3421 %} 3422 3423 // Special Registers 3424 operand rbx_RegI() 3425 %{ 3426 constraint(ALLOC_IN_RC(int_rbx_reg)); 3427 match(RegI); 3428 match(rRegI); 3429 3430 format %{ "RBX" %} 3431 interface(REG_INTER); 3432 %} 3433 3434 operand rcx_RegI() 3435 %{ 3436 constraint(ALLOC_IN_RC(int_rcx_reg)); 3437 match(RegI); 3438 match(rRegI); 3439 3440 format %{ "RCX" %} 3441 interface(REG_INTER); 3442 %} 3443 3444 operand rdx_RegI() 3445 %{ 3446 constraint(ALLOC_IN_RC(int_rdx_reg)); 3447 match(RegI); 3448 match(rRegI); 3449 3450 format %{ "RDX" %} 3451 interface(REG_INTER); 3452 %} 3453 3454 operand rdi_RegI() 3455 %{ 3456 constraint(ALLOC_IN_RC(int_rdi_reg)); 3457 match(RegI); 3458 match(rRegI); 3459 3460 format %{ "RDI" %} 3461 interface(REG_INTER); 3462 %} 3463 3464 operand no_rax_rdx_RegI() 3465 %{ 3466 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg)); 3467 match(RegI); 3468 match(rbx_RegI); 3469 match(rcx_RegI); 3470 match(rdi_RegI); 3471 3472 format %{ %} 3473 interface(REG_INTER); 3474 %} 3475 3476 // Pointer Register 3477 operand any_RegP() 3478 %{ 3479 constraint(ALLOC_IN_RC(any_reg)); 3480 match(RegP); 3481 match(rax_RegP); 3482 match(rbx_RegP); 3483 match(rdi_RegP); 3484 match(rsi_RegP); 3485 match(rbp_RegP); 3486 match(r15_RegP); 3487 match(rRegP); 3488 3489 format %{ %} 3490 interface(REG_INTER); 3491 %} 3492 3493 operand rRegP() 3494 %{ 3495 constraint(ALLOC_IN_RC(ptr_reg)); 3496 match(RegP); 3497 match(rax_RegP); 3498 match(rbx_RegP); 3499 match(rdi_RegP); 3500 match(rsi_RegP); 3501 match(rbp_RegP); // See Q&A below about 3502 match(r15_RegP); // r15_RegP and rbp_RegP. 3503 3504 format %{ %} 3505 interface(REG_INTER); 3506 %} 3507 3508 operand rRegN() %{ 3509 constraint(ALLOC_IN_RC(int_reg)); 3510 match(RegN); 3511 3512 format %{ %} 3513 interface(REG_INTER); 3514 %} 3515 3516 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP? 3517 // Answer: Operand match rules govern the DFA as it processes instruction inputs. 3518 // It's fine for an instruction input that expects rRegP to match a r15_RegP. 3519 // The output of an instruction is controlled by the allocator, which respects 3520 // register class masks, not match rules. Unless an instruction mentions 3521 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered 3522 // by the allocator as an input. 3523 // The same logic applies to rbp_RegP being a match for rRegP: If PreserveFramePointer==true, 3524 // the RBP is used as a proper frame pointer and is not included in ptr_reg. As a 3525 // result, RBP is not included in the output of the instruction either. 3526 3527 operand no_rax_RegP() 3528 %{ 3529 constraint(ALLOC_IN_RC(ptr_no_rax_reg)); 3530 match(RegP); 3531 match(rbx_RegP); 3532 match(rsi_RegP); 3533 match(rdi_RegP); 3534 3535 format %{ %} 3536 interface(REG_INTER); 3537 %} 3538 3539 // This operand is not allowed to use RBP even if 3540 // RBP is not used to hold the frame pointer. 3541 operand no_rbp_RegP() 3542 %{ 3543 constraint(ALLOC_IN_RC(ptr_reg_no_rbp)); 3544 match(RegP); 3545 match(rbx_RegP); 3546 match(rsi_RegP); 3547 match(rdi_RegP); 3548 3549 format %{ %} 3550 interface(REG_INTER); 3551 %} 3552 3553 operand no_rax_rbx_RegP() 3554 %{ 3555 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg)); 3556 match(RegP); 3557 match(rsi_RegP); 3558 match(rdi_RegP); 3559 3560 format %{ %} 3561 interface(REG_INTER); 3562 %} 3563 3564 // Special Registers 3565 // Return a pointer value 3566 operand rax_RegP() 3567 %{ 3568 constraint(ALLOC_IN_RC(ptr_rax_reg)); 3569 match(RegP); 3570 match(rRegP); 3571 3572 format %{ %} 3573 interface(REG_INTER); 3574 %} 3575 3576 // Special Registers 3577 // Return a compressed pointer value 3578 operand rax_RegN() 3579 %{ 3580 constraint(ALLOC_IN_RC(int_rax_reg)); 3581 match(RegN); 3582 match(rRegN); 3583 3584 format %{ %} 3585 interface(REG_INTER); 3586 %} 3587 3588 // Used in AtomicAdd 3589 operand rbx_RegP() 3590 %{ 3591 constraint(ALLOC_IN_RC(ptr_rbx_reg)); 3592 match(RegP); 3593 match(rRegP); 3594 3595 format %{ %} 3596 interface(REG_INTER); 3597 %} 3598 3599 operand rsi_RegP() 3600 %{ 3601 constraint(ALLOC_IN_RC(ptr_rsi_reg)); 3602 match(RegP); 3603 match(rRegP); 3604 3605 format %{ %} 3606 interface(REG_INTER); 3607 %} 3608 3609 operand rbp_RegP() 3610 %{ 3611 constraint(ALLOC_IN_RC(ptr_rbp_reg)); 3612 match(RegP); 3613 match(rRegP); 3614 3615 format %{ %} 3616 interface(REG_INTER); 3617 %} 3618 3619 // Used in rep stosq 3620 operand rdi_RegP() 3621 %{ 3622 constraint(ALLOC_IN_RC(ptr_rdi_reg)); 3623 match(RegP); 3624 match(rRegP); 3625 3626 format %{ %} 3627 interface(REG_INTER); 3628 %} 3629 3630 operand r15_RegP() 3631 %{ 3632 constraint(ALLOC_IN_RC(ptr_r15_reg)); 3633 match(RegP); 3634 match(rRegP); 3635 3636 format %{ %} 3637 interface(REG_INTER); 3638 %} 3639 3640 operand rRegL() 3641 %{ 3642 constraint(ALLOC_IN_RC(long_reg)); 3643 match(RegL); 3644 match(rax_RegL); 3645 match(rdx_RegL); 3646 3647 format %{ %} 3648 interface(REG_INTER); 3649 %} 3650 3651 // Special Registers 3652 operand no_rax_rdx_RegL() 3653 %{ 3654 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg)); 3655 match(RegL); 3656 match(rRegL); 3657 3658 format %{ %} 3659 interface(REG_INTER); 3660 %} 3661 3662 operand no_rax_RegL() 3663 %{ 3664 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg)); 3665 match(RegL); 3666 match(rRegL); 3667 match(rdx_RegL); 3668 3669 format %{ %} 3670 interface(REG_INTER); 3671 %} 3672 3673 operand rax_RegL() 3674 %{ 3675 constraint(ALLOC_IN_RC(long_rax_reg)); 3676 match(RegL); 3677 match(rRegL); 3678 3679 format %{ "RAX" %} 3680 interface(REG_INTER); 3681 %} 3682 3683 operand rcx_RegL() 3684 %{ 3685 constraint(ALLOC_IN_RC(long_rcx_reg)); 3686 match(RegL); 3687 match(rRegL); 3688 3689 format %{ %} 3690 interface(REG_INTER); 3691 %} 3692 3693 operand rdx_RegL() 3694 %{ 3695 constraint(ALLOC_IN_RC(long_rdx_reg)); 3696 match(RegL); 3697 match(rRegL); 3698 3699 format %{ %} 3700 interface(REG_INTER); 3701 %} 3702 3703 // Flags register, used as output of compare instructions 3704 operand rFlagsReg() 3705 %{ 3706 constraint(ALLOC_IN_RC(int_flags)); 3707 match(RegFlags); 3708 3709 format %{ "RFLAGS" %} 3710 interface(REG_INTER); 3711 %} 3712 3713 // Flags register, used as output of FLOATING POINT compare instructions 3714 operand rFlagsRegU() 3715 %{ 3716 constraint(ALLOC_IN_RC(int_flags)); 3717 match(RegFlags); 3718 3719 format %{ "RFLAGS_U" %} 3720 interface(REG_INTER); 3721 %} 3722 3723 operand rFlagsRegUCF() %{ 3724 constraint(ALLOC_IN_RC(int_flags)); 3725 match(RegFlags); 3726 predicate(false); 3727 3728 format %{ "RFLAGS_U_CF" %} 3729 interface(REG_INTER); 3730 %} 3731 3732 // Float register operands 3733 operand regF() %{ 3734 constraint(ALLOC_IN_RC(float_reg)); 3735 match(RegF); 3736 3737 format %{ %} 3738 interface(REG_INTER); 3739 %} 3740 3741 // Float register operands 3742 operand legRegF() %{ 3743 constraint(ALLOC_IN_RC(float_reg_legacy)); 3744 match(RegF); 3745 3746 format %{ %} 3747 interface(REG_INTER); 3748 %} 3749 3750 // Float register operands 3751 operand vlRegF() %{ 3752 constraint(ALLOC_IN_RC(float_reg_vl)); 3753 match(RegF); 3754 3755 format %{ %} 3756 interface(REG_INTER); 3757 %} 3758 3759 // Double register operands 3760 operand regD() %{ 3761 constraint(ALLOC_IN_RC(double_reg)); 3762 match(RegD); 3763 3764 format %{ %} 3765 interface(REG_INTER); 3766 %} 3767 3768 // Double register operands 3769 operand legRegD() %{ 3770 constraint(ALLOC_IN_RC(double_reg_legacy)); 3771 match(RegD); 3772 3773 format %{ %} 3774 interface(REG_INTER); 3775 %} 3776 3777 // Double register operands 3778 operand vlRegD() %{ 3779 constraint(ALLOC_IN_RC(double_reg_vl)); 3780 match(RegD); 3781 3782 format %{ %} 3783 interface(REG_INTER); 3784 %} 3785 3786 //----------Memory Operands---------------------------------------------------- 3787 // Direct Memory Operand 3788 // operand direct(immP addr) 3789 // %{ 3790 // match(addr); 3791 3792 // format %{ "[$addr]" %} 3793 // interface(MEMORY_INTER) %{ 3794 // base(0xFFFFFFFF); 3795 // index(0x4); 3796 // scale(0x0); 3797 // disp($addr); 3798 // %} 3799 // %} 3800 3801 // Indirect Memory Operand 3802 operand indirect(any_RegP reg) 3803 %{ 3804 constraint(ALLOC_IN_RC(ptr_reg)); 3805 match(reg); 3806 3807 format %{ "[$reg]" %} 3808 interface(MEMORY_INTER) %{ 3809 base($reg); 3810 index(0x4); 3811 scale(0x0); 3812 disp(0x0); 3813 %} 3814 %} 3815 3816 // Indirect Memory Plus Short Offset Operand 3817 operand indOffset8(any_RegP reg, immL8 off) 3818 %{ 3819 constraint(ALLOC_IN_RC(ptr_reg)); 3820 match(AddP reg off); 3821 3822 format %{ "[$reg + $off (8-bit)]" %} 3823 interface(MEMORY_INTER) %{ 3824 base($reg); 3825 index(0x4); 3826 scale(0x0); 3827 disp($off); 3828 %} 3829 %} 3830 3831 // Indirect Memory Plus Long Offset Operand 3832 operand indOffset32(any_RegP reg, immL32 off) 3833 %{ 3834 constraint(ALLOC_IN_RC(ptr_reg)); 3835 match(AddP reg off); 3836 3837 format %{ "[$reg + $off (32-bit)]" %} 3838 interface(MEMORY_INTER) %{ 3839 base($reg); 3840 index(0x4); 3841 scale(0x0); 3842 disp($off); 3843 %} 3844 %} 3845 3846 // Indirect Memory Plus Index Register Plus Offset Operand 3847 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off) 3848 %{ 3849 constraint(ALLOC_IN_RC(ptr_reg)); 3850 match(AddP (AddP reg lreg) off); 3851 3852 op_cost(10); 3853 format %{"[$reg + $off + $lreg]" %} 3854 interface(MEMORY_INTER) %{ 3855 base($reg); 3856 index($lreg); 3857 scale(0x0); 3858 disp($off); 3859 %} 3860 %} 3861 3862 // Indirect Memory Plus Index Register Plus Offset Operand 3863 operand indIndex(any_RegP reg, rRegL lreg) 3864 %{ 3865 constraint(ALLOC_IN_RC(ptr_reg)); 3866 match(AddP reg lreg); 3867 3868 op_cost(10); 3869 format %{"[$reg + $lreg]" %} 3870 interface(MEMORY_INTER) %{ 3871 base($reg); 3872 index($lreg); 3873 scale(0x0); 3874 disp(0x0); 3875 %} 3876 %} 3877 3878 // Indirect Memory Times Scale Plus Index Register 3879 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale) 3880 %{ 3881 constraint(ALLOC_IN_RC(ptr_reg)); 3882 match(AddP reg (LShiftL lreg scale)); 3883 3884 op_cost(10); 3885 format %{"[$reg + $lreg << $scale]" %} 3886 interface(MEMORY_INTER) %{ 3887 base($reg); 3888 index($lreg); 3889 scale($scale); 3890 disp(0x0); 3891 %} 3892 %} 3893 3894 operand indPosIndexScale(any_RegP reg, rRegI idx, immI2 scale) 3895 %{ 3896 constraint(ALLOC_IN_RC(ptr_reg)); 3897 predicate(n->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 3898 match(AddP reg (LShiftL (ConvI2L idx) scale)); 3899 3900 op_cost(10); 3901 format %{"[$reg + pos $idx << $scale]" %} 3902 interface(MEMORY_INTER) %{ 3903 base($reg); 3904 index($idx); 3905 scale($scale); 3906 disp(0x0); 3907 %} 3908 %} 3909 3910 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand 3911 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale) 3912 %{ 3913 constraint(ALLOC_IN_RC(ptr_reg)); 3914 match(AddP (AddP reg (LShiftL lreg scale)) off); 3915 3916 op_cost(10); 3917 format %{"[$reg + $off + $lreg << $scale]" %} 3918 interface(MEMORY_INTER) %{ 3919 base($reg); 3920 index($lreg); 3921 scale($scale); 3922 disp($off); 3923 %} 3924 %} 3925 3926 // Indirect Memory Plus Positive Index Register Plus Offset Operand 3927 operand indPosIndexOffset(any_RegP reg, immL32 off, rRegI idx) 3928 %{ 3929 constraint(ALLOC_IN_RC(ptr_reg)); 3930 predicate(n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0); 3931 match(AddP (AddP reg (ConvI2L idx)) off); 3932 3933 op_cost(10); 3934 format %{"[$reg + $off + $idx]" %} 3935 interface(MEMORY_INTER) %{ 3936 base($reg); 3937 index($idx); 3938 scale(0x0); 3939 disp($off); 3940 %} 3941 %} 3942 3943 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand 3944 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale) 3945 %{ 3946 constraint(ALLOC_IN_RC(ptr_reg)); 3947 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 3948 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off); 3949 3950 op_cost(10); 3951 format %{"[$reg + $off + $idx << $scale]" %} 3952 interface(MEMORY_INTER) %{ 3953 base($reg); 3954 index($idx); 3955 scale($scale); 3956 disp($off); 3957 %} 3958 %} 3959 3960 // Indirect Narrow Oop Plus Offset Operand 3961 // Note: x86 architecture doesn't support "scale * index + offset" without a base 3962 // we can't free r12 even with CompressedOops::base() == NULL. 3963 operand indCompressedOopOffset(rRegN reg, immL32 off) %{ 3964 predicate(UseCompressedOops && (CompressedOops::shift() == Address::times_8)); 3965 constraint(ALLOC_IN_RC(ptr_reg)); 3966 match(AddP (DecodeN reg) off); 3967 3968 op_cost(10); 3969 format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %} 3970 interface(MEMORY_INTER) %{ 3971 base(0xc); // R12 3972 index($reg); 3973 scale(0x3); 3974 disp($off); 3975 %} 3976 %} 3977 3978 // Indirect Memory Operand 3979 operand indirectNarrow(rRegN reg) 3980 %{ 3981 predicate(CompressedOops::shift() == 0); 3982 constraint(ALLOC_IN_RC(ptr_reg)); 3983 match(DecodeN reg); 3984 3985 format %{ "[$reg]" %} 3986 interface(MEMORY_INTER) %{ 3987 base($reg); 3988 index(0x4); 3989 scale(0x0); 3990 disp(0x0); 3991 %} 3992 %} 3993 3994 // Indirect Memory Plus Short Offset Operand 3995 operand indOffset8Narrow(rRegN reg, immL8 off) 3996 %{ 3997 predicate(CompressedOops::shift() == 0); 3998 constraint(ALLOC_IN_RC(ptr_reg)); 3999 match(AddP (DecodeN reg) off); 4000 4001 format %{ "[$reg + $off (8-bit)]" %} 4002 interface(MEMORY_INTER) %{ 4003 base($reg); 4004 index(0x4); 4005 scale(0x0); 4006 disp($off); 4007 %} 4008 %} 4009 4010 // Indirect Memory Plus Long Offset Operand 4011 operand indOffset32Narrow(rRegN reg, immL32 off) 4012 %{ 4013 predicate(CompressedOops::shift() == 0); 4014 constraint(ALLOC_IN_RC(ptr_reg)); 4015 match(AddP (DecodeN reg) off); 4016 4017 format %{ "[$reg + $off (32-bit)]" %} 4018 interface(MEMORY_INTER) %{ 4019 base($reg); 4020 index(0x4); 4021 scale(0x0); 4022 disp($off); 4023 %} 4024 %} 4025 4026 // Indirect Memory Plus Index Register Plus Offset Operand 4027 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off) 4028 %{ 4029 predicate(CompressedOops::shift() == 0); 4030 constraint(ALLOC_IN_RC(ptr_reg)); 4031 match(AddP (AddP (DecodeN reg) lreg) off); 4032 4033 op_cost(10); 4034 format %{"[$reg + $off + $lreg]" %} 4035 interface(MEMORY_INTER) %{ 4036 base($reg); 4037 index($lreg); 4038 scale(0x0); 4039 disp($off); 4040 %} 4041 %} 4042 4043 // Indirect Memory Plus Index Register Plus Offset Operand 4044 operand indIndexNarrow(rRegN reg, rRegL lreg) 4045 %{ 4046 predicate(CompressedOops::shift() == 0); 4047 constraint(ALLOC_IN_RC(ptr_reg)); 4048 match(AddP (DecodeN reg) lreg); 4049 4050 op_cost(10); 4051 format %{"[$reg + $lreg]" %} 4052 interface(MEMORY_INTER) %{ 4053 base($reg); 4054 index($lreg); 4055 scale(0x0); 4056 disp(0x0); 4057 %} 4058 %} 4059 4060 // Indirect Memory Times Scale Plus Index Register 4061 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale) 4062 %{ 4063 predicate(CompressedOops::shift() == 0); 4064 constraint(ALLOC_IN_RC(ptr_reg)); 4065 match(AddP (DecodeN reg) (LShiftL lreg scale)); 4066 4067 op_cost(10); 4068 format %{"[$reg + $lreg << $scale]" %} 4069 interface(MEMORY_INTER) %{ 4070 base($reg); 4071 index($lreg); 4072 scale($scale); 4073 disp(0x0); 4074 %} 4075 %} 4076 4077 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand 4078 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale) 4079 %{ 4080 predicate(CompressedOops::shift() == 0); 4081 constraint(ALLOC_IN_RC(ptr_reg)); 4082 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off); 4083 4084 op_cost(10); 4085 format %{"[$reg + $off + $lreg << $scale]" %} 4086 interface(MEMORY_INTER) %{ 4087 base($reg); 4088 index($lreg); 4089 scale($scale); 4090 disp($off); 4091 %} 4092 %} 4093 4094 // Indirect Memory Times Plus Positive Index Register Plus Offset Operand 4095 operand indPosIndexOffsetNarrow(rRegN reg, immL32 off, rRegI idx) 4096 %{ 4097 constraint(ALLOC_IN_RC(ptr_reg)); 4098 predicate(CompressedOops::shift() == 0 && n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0); 4099 match(AddP (AddP (DecodeN reg) (ConvI2L idx)) off); 4100 4101 op_cost(10); 4102 format %{"[$reg + $off + $idx]" %} 4103 interface(MEMORY_INTER) %{ 4104 base($reg); 4105 index($idx); 4106 scale(0x0); 4107 disp($off); 4108 %} 4109 %} 4110 4111 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand 4112 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale) 4113 %{ 4114 constraint(ALLOC_IN_RC(ptr_reg)); 4115 predicate(CompressedOops::shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 4116 match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off); 4117 4118 op_cost(10); 4119 format %{"[$reg + $off + $idx << $scale]" %} 4120 interface(MEMORY_INTER) %{ 4121 base($reg); 4122 index($idx); 4123 scale($scale); 4124 disp($off); 4125 %} 4126 %} 4127 4128 //----------Special Memory Operands-------------------------------------------- 4129 // Stack Slot Operand - This operand is used for loading and storing temporary 4130 // values on the stack where a match requires a value to 4131 // flow through memory. 4132 operand stackSlotP(sRegP reg) 4133 %{ 4134 constraint(ALLOC_IN_RC(stack_slots)); 4135 // No match rule because this operand is only generated in matching 4136 4137 format %{ "[$reg]" %} 4138 interface(MEMORY_INTER) %{ 4139 base(0x4); // RSP 4140 index(0x4); // No Index 4141 scale(0x0); // No Scale 4142 disp($reg); // Stack Offset 4143 %} 4144 %} 4145 4146 operand stackSlotI(sRegI reg) 4147 %{ 4148 constraint(ALLOC_IN_RC(stack_slots)); 4149 // No match rule because this operand is only generated in matching 4150 4151 format %{ "[$reg]" %} 4152 interface(MEMORY_INTER) %{ 4153 base(0x4); // RSP 4154 index(0x4); // No Index 4155 scale(0x0); // No Scale 4156 disp($reg); // Stack Offset 4157 %} 4158 %} 4159 4160 operand stackSlotF(sRegF reg) 4161 %{ 4162 constraint(ALLOC_IN_RC(stack_slots)); 4163 // No match rule because this operand is only generated in matching 4164 4165 format %{ "[$reg]" %} 4166 interface(MEMORY_INTER) %{ 4167 base(0x4); // RSP 4168 index(0x4); // No Index 4169 scale(0x0); // No Scale 4170 disp($reg); // Stack Offset 4171 %} 4172 %} 4173 4174 operand stackSlotD(sRegD reg) 4175 %{ 4176 constraint(ALLOC_IN_RC(stack_slots)); 4177 // No match rule because this operand is only generated in matching 4178 4179 format %{ "[$reg]" %} 4180 interface(MEMORY_INTER) %{ 4181 base(0x4); // RSP 4182 index(0x4); // No Index 4183 scale(0x0); // No Scale 4184 disp($reg); // Stack Offset 4185 %} 4186 %} 4187 operand stackSlotL(sRegL reg) 4188 %{ 4189 constraint(ALLOC_IN_RC(stack_slots)); 4190 // No match rule because this operand is only generated in matching 4191 4192 format %{ "[$reg]" %} 4193 interface(MEMORY_INTER) %{ 4194 base(0x4); // RSP 4195 index(0x4); // No Index 4196 scale(0x0); // No Scale 4197 disp($reg); // Stack Offset 4198 %} 4199 %} 4200 4201 //----------Conditional Branch Operands---------------------------------------- 4202 // Comparison Op - This is the operation of the comparison, and is limited to 4203 // the following set of codes: 4204 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4205 // 4206 // Other attributes of the comparison, such as unsignedness, are specified 4207 // by the comparison instruction that sets a condition code flags register. 4208 // That result is represented by a flags operand whose subtype is appropriate 4209 // to the unsignedness (etc.) of the comparison. 4210 // 4211 // Later, the instruction which matches both the Comparison Op (a Bool) and 4212 // the flags (produced by the Cmp) specifies the coding of the comparison op 4213 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4214 4215 // Comparision Code 4216 operand cmpOp() 4217 %{ 4218 match(Bool); 4219 4220 format %{ "" %} 4221 interface(COND_INTER) %{ 4222 equal(0x4, "e"); 4223 not_equal(0x5, "ne"); 4224 less(0xC, "l"); 4225 greater_equal(0xD, "ge"); 4226 less_equal(0xE, "le"); 4227 greater(0xF, "g"); 4228 overflow(0x0, "o"); 4229 no_overflow(0x1, "no"); 4230 %} 4231 %} 4232 4233 // Comparison Code, unsigned compare. Used by FP also, with 4234 // C2 (unordered) turned into GT or LT already. The other bits 4235 // C0 and C3 are turned into Carry & Zero flags. 4236 operand cmpOpU() 4237 %{ 4238 match(Bool); 4239 4240 format %{ "" %} 4241 interface(COND_INTER) %{ 4242 equal(0x4, "e"); 4243 not_equal(0x5, "ne"); 4244 less(0x2, "b"); 4245 greater_equal(0x3, "nb"); 4246 less_equal(0x6, "be"); 4247 greater(0x7, "nbe"); 4248 overflow(0x0, "o"); 4249 no_overflow(0x1, "no"); 4250 %} 4251 %} 4252 4253 4254 // Floating comparisons that don't require any fixup for the unordered case 4255 operand cmpOpUCF() %{ 4256 match(Bool); 4257 predicate(n->as_Bool()->_test._test == BoolTest::lt || 4258 n->as_Bool()->_test._test == BoolTest::ge || 4259 n->as_Bool()->_test._test == BoolTest::le || 4260 n->as_Bool()->_test._test == BoolTest::gt); 4261 format %{ "" %} 4262 interface(COND_INTER) %{ 4263 equal(0x4, "e"); 4264 not_equal(0x5, "ne"); 4265 less(0x2, "b"); 4266 greater_equal(0x3, "nb"); 4267 less_equal(0x6, "be"); 4268 greater(0x7, "nbe"); 4269 overflow(0x0, "o"); 4270 no_overflow(0x1, "no"); 4271 %} 4272 %} 4273 4274 4275 // Floating comparisons that can be fixed up with extra conditional jumps 4276 operand cmpOpUCF2() %{ 4277 match(Bool); 4278 predicate(n->as_Bool()->_test._test == BoolTest::ne || 4279 n->as_Bool()->_test._test == BoolTest::eq); 4280 format %{ "" %} 4281 interface(COND_INTER) %{ 4282 equal(0x4, "e"); 4283 not_equal(0x5, "ne"); 4284 less(0x2, "b"); 4285 greater_equal(0x3, "nb"); 4286 less_equal(0x6, "be"); 4287 greater(0x7, "nbe"); 4288 overflow(0x0, "o"); 4289 no_overflow(0x1, "no"); 4290 %} 4291 %} 4292 4293 //----------OPERAND CLASSES---------------------------------------------------- 4294 // Operand Classes are groups of operands that are used as to simplify 4295 // instruction definitions by not requiring the AD writer to specify separate 4296 // instructions for every form of operand when the instruction accepts 4297 // multiple operand types with the same basic encoding and format. The classic 4298 // case of this is memory operands. 4299 4300 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex, 4301 indIndexScale, indPosIndexScale, indIndexScaleOffset, indPosIndexOffset, indPosIndexScaleOffset, 4302 indCompressedOopOffset, 4303 indirectNarrow, indOffset8Narrow, indOffset32Narrow, 4304 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow, 4305 indIndexScaleOffsetNarrow, indPosIndexOffsetNarrow, indPosIndexScaleOffsetNarrow); 4306 4307 //----------PIPELINE----------------------------------------------------------- 4308 // Rules which define the behavior of the target architectures pipeline. 4309 pipeline %{ 4310 4311 //----------ATTRIBUTES--------------------------------------------------------- 4312 attributes %{ 4313 variable_size_instructions; // Fixed size instructions 4314 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle 4315 instruction_unit_size = 1; // An instruction is 1 bytes long 4316 instruction_fetch_unit_size = 16; // The processor fetches one line 4317 instruction_fetch_units = 1; // of 16 bytes 4318 4319 // List of nop instructions 4320 nops( MachNop ); 4321 %} 4322 4323 //----------RESOURCES---------------------------------------------------------- 4324 // Resources are the functional units available to the machine 4325 4326 // Generic P2/P3 pipeline 4327 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of 4328 // 3 instructions decoded per cycle. 4329 // 2 load/store ops per cycle, 1 branch, 1 FPU, 4330 // 3 ALU op, only ALU0 handles mul instructions. 4331 resources( D0, D1, D2, DECODE = D0 | D1 | D2, 4332 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2, 4333 BR, FPU, 4334 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2); 4335 4336 //----------PIPELINE DESCRIPTION----------------------------------------------- 4337 // Pipeline Description specifies the stages in the machine's pipeline 4338 4339 // Generic P2/P3 pipeline 4340 pipe_desc(S0, S1, S2, S3, S4, S5); 4341 4342 //----------PIPELINE CLASSES--------------------------------------------------- 4343 // Pipeline Classes describe the stages in which input and output are 4344 // referenced by the hardware pipeline. 4345 4346 // Naming convention: ialu or fpu 4347 // Then: _reg 4348 // Then: _reg if there is a 2nd register 4349 // Then: _long if it's a pair of instructions implementing a long 4350 // Then: _fat if it requires the big decoder 4351 // Or: _mem if it requires the big decoder and a memory unit. 4352 4353 // Integer ALU reg operation 4354 pipe_class ialu_reg(rRegI dst) 4355 %{ 4356 single_instruction; 4357 dst : S4(write); 4358 dst : S3(read); 4359 DECODE : S0; // any decoder 4360 ALU : S3; // any alu 4361 %} 4362 4363 // Long ALU reg operation 4364 pipe_class ialu_reg_long(rRegL dst) 4365 %{ 4366 instruction_count(2); 4367 dst : S4(write); 4368 dst : S3(read); 4369 DECODE : S0(2); // any 2 decoders 4370 ALU : S3(2); // both alus 4371 %} 4372 4373 // Integer ALU reg operation using big decoder 4374 pipe_class ialu_reg_fat(rRegI dst) 4375 %{ 4376 single_instruction; 4377 dst : S4(write); 4378 dst : S3(read); 4379 D0 : S0; // big decoder only 4380 ALU : S3; // any alu 4381 %} 4382 4383 // Integer ALU reg-reg operation 4384 pipe_class ialu_reg_reg(rRegI dst, rRegI src) 4385 %{ 4386 single_instruction; 4387 dst : S4(write); 4388 src : S3(read); 4389 DECODE : S0; // any decoder 4390 ALU : S3; // any alu 4391 %} 4392 4393 // Integer ALU reg-reg operation 4394 pipe_class ialu_reg_reg_fat(rRegI dst, memory src) 4395 %{ 4396 single_instruction; 4397 dst : S4(write); 4398 src : S3(read); 4399 D0 : S0; // big decoder only 4400 ALU : S3; // any alu 4401 %} 4402 4403 // Integer ALU reg-mem operation 4404 pipe_class ialu_reg_mem(rRegI dst, memory mem) 4405 %{ 4406 single_instruction; 4407 dst : S5(write); 4408 mem : S3(read); 4409 D0 : S0; // big decoder only 4410 ALU : S4; // any alu 4411 MEM : S3; // any mem 4412 %} 4413 4414 // Integer mem operation (prefetch) 4415 pipe_class ialu_mem(memory mem) 4416 %{ 4417 single_instruction; 4418 mem : S3(read); 4419 D0 : S0; // big decoder only 4420 MEM : S3; // any mem 4421 %} 4422 4423 // Integer Store to Memory 4424 pipe_class ialu_mem_reg(memory mem, rRegI src) 4425 %{ 4426 single_instruction; 4427 mem : S3(read); 4428 src : S5(read); 4429 D0 : S0; // big decoder only 4430 ALU : S4; // any alu 4431 MEM : S3; 4432 %} 4433 4434 // // Long Store to Memory 4435 // pipe_class ialu_mem_long_reg(memory mem, rRegL src) 4436 // %{ 4437 // instruction_count(2); 4438 // mem : S3(read); 4439 // src : S5(read); 4440 // D0 : S0(2); // big decoder only; twice 4441 // ALU : S4(2); // any 2 alus 4442 // MEM : S3(2); // Both mems 4443 // %} 4444 4445 // Integer Store to Memory 4446 pipe_class ialu_mem_imm(memory mem) 4447 %{ 4448 single_instruction; 4449 mem : S3(read); 4450 D0 : S0; // big decoder only 4451 ALU : S4; // any alu 4452 MEM : S3; 4453 %} 4454 4455 // Integer ALU0 reg-reg operation 4456 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src) 4457 %{ 4458 single_instruction; 4459 dst : S4(write); 4460 src : S3(read); 4461 D0 : S0; // Big decoder only 4462 ALU0 : S3; // only alu0 4463 %} 4464 4465 // Integer ALU0 reg-mem operation 4466 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem) 4467 %{ 4468 single_instruction; 4469 dst : S5(write); 4470 mem : S3(read); 4471 D0 : S0; // big decoder only 4472 ALU0 : S4; // ALU0 only 4473 MEM : S3; // any mem 4474 %} 4475 4476 // Integer ALU reg-reg operation 4477 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2) 4478 %{ 4479 single_instruction; 4480 cr : S4(write); 4481 src1 : S3(read); 4482 src2 : S3(read); 4483 DECODE : S0; // any decoder 4484 ALU : S3; // any alu 4485 %} 4486 4487 // Integer ALU reg-imm operation 4488 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1) 4489 %{ 4490 single_instruction; 4491 cr : S4(write); 4492 src1 : S3(read); 4493 DECODE : S0; // any decoder 4494 ALU : S3; // any alu 4495 %} 4496 4497 // Integer ALU reg-mem operation 4498 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2) 4499 %{ 4500 single_instruction; 4501 cr : S4(write); 4502 src1 : S3(read); 4503 src2 : S3(read); 4504 D0 : S0; // big decoder only 4505 ALU : S4; // any alu 4506 MEM : S3; 4507 %} 4508 4509 // Conditional move reg-reg 4510 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y) 4511 %{ 4512 instruction_count(4); 4513 y : S4(read); 4514 q : S3(read); 4515 p : S3(read); 4516 DECODE : S0(4); // any decoder 4517 %} 4518 4519 // Conditional move reg-reg 4520 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr) 4521 %{ 4522 single_instruction; 4523 dst : S4(write); 4524 src : S3(read); 4525 cr : S3(read); 4526 DECODE : S0; // any decoder 4527 %} 4528 4529 // Conditional move reg-mem 4530 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src) 4531 %{ 4532 single_instruction; 4533 dst : S4(write); 4534 src : S3(read); 4535 cr : S3(read); 4536 DECODE : S0; // any decoder 4537 MEM : S3; 4538 %} 4539 4540 // Conditional move reg-reg long 4541 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src) 4542 %{ 4543 single_instruction; 4544 dst : S4(write); 4545 src : S3(read); 4546 cr : S3(read); 4547 DECODE : S0(2); // any 2 decoders 4548 %} 4549 4550 // XXX 4551 // // Conditional move double reg-reg 4552 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src) 4553 // %{ 4554 // single_instruction; 4555 // dst : S4(write); 4556 // src : S3(read); 4557 // cr : S3(read); 4558 // DECODE : S0; // any decoder 4559 // %} 4560 4561 // Float reg-reg operation 4562 pipe_class fpu_reg(regD dst) 4563 %{ 4564 instruction_count(2); 4565 dst : S3(read); 4566 DECODE : S0(2); // any 2 decoders 4567 FPU : S3; 4568 %} 4569 4570 // Float reg-reg operation 4571 pipe_class fpu_reg_reg(regD dst, regD src) 4572 %{ 4573 instruction_count(2); 4574 dst : S4(write); 4575 src : S3(read); 4576 DECODE : S0(2); // any 2 decoders 4577 FPU : S3; 4578 %} 4579 4580 // Float reg-reg operation 4581 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2) 4582 %{ 4583 instruction_count(3); 4584 dst : S4(write); 4585 src1 : S3(read); 4586 src2 : S3(read); 4587 DECODE : S0(3); // any 3 decoders 4588 FPU : S3(2); 4589 %} 4590 4591 // Float reg-reg operation 4592 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3) 4593 %{ 4594 instruction_count(4); 4595 dst : S4(write); 4596 src1 : S3(read); 4597 src2 : S3(read); 4598 src3 : S3(read); 4599 DECODE : S0(4); // any 3 decoders 4600 FPU : S3(2); 4601 %} 4602 4603 // Float reg-reg operation 4604 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3) 4605 %{ 4606 instruction_count(4); 4607 dst : S4(write); 4608 src1 : S3(read); 4609 src2 : S3(read); 4610 src3 : S3(read); 4611 DECODE : S1(3); // any 3 decoders 4612 D0 : S0; // Big decoder only 4613 FPU : S3(2); 4614 MEM : S3; 4615 %} 4616 4617 // Float reg-mem operation 4618 pipe_class fpu_reg_mem(regD dst, memory mem) 4619 %{ 4620 instruction_count(2); 4621 dst : S5(write); 4622 mem : S3(read); 4623 D0 : S0; // big decoder only 4624 DECODE : S1; // any decoder for FPU POP 4625 FPU : S4; 4626 MEM : S3; // any mem 4627 %} 4628 4629 // Float reg-mem operation 4630 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem) 4631 %{ 4632 instruction_count(3); 4633 dst : S5(write); 4634 src1 : S3(read); 4635 mem : S3(read); 4636 D0 : S0; // big decoder only 4637 DECODE : S1(2); // any decoder for FPU POP 4638 FPU : S4; 4639 MEM : S3; // any mem 4640 %} 4641 4642 // Float mem-reg operation 4643 pipe_class fpu_mem_reg(memory mem, regD src) 4644 %{ 4645 instruction_count(2); 4646 src : S5(read); 4647 mem : S3(read); 4648 DECODE : S0; // any decoder for FPU PUSH 4649 D0 : S1; // big decoder only 4650 FPU : S4; 4651 MEM : S3; // any mem 4652 %} 4653 4654 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2) 4655 %{ 4656 instruction_count(3); 4657 src1 : S3(read); 4658 src2 : S3(read); 4659 mem : S3(read); 4660 DECODE : S0(2); // any decoder for FPU PUSH 4661 D0 : S1; // big decoder only 4662 FPU : S4; 4663 MEM : S3; // any mem 4664 %} 4665 4666 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2) 4667 %{ 4668 instruction_count(3); 4669 src1 : S3(read); 4670 src2 : S3(read); 4671 mem : S4(read); 4672 DECODE : S0; // any decoder for FPU PUSH 4673 D0 : S0(2); // big decoder only 4674 FPU : S4; 4675 MEM : S3(2); // any mem 4676 %} 4677 4678 pipe_class fpu_mem_mem(memory dst, memory src1) 4679 %{ 4680 instruction_count(2); 4681 src1 : S3(read); 4682 dst : S4(read); 4683 D0 : S0(2); // big decoder only 4684 MEM : S3(2); // any mem 4685 %} 4686 4687 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2) 4688 %{ 4689 instruction_count(3); 4690 src1 : S3(read); 4691 src2 : S3(read); 4692 dst : S4(read); 4693 D0 : S0(3); // big decoder only 4694 FPU : S4; 4695 MEM : S3(3); // any mem 4696 %} 4697 4698 pipe_class fpu_mem_reg_con(memory mem, regD src1) 4699 %{ 4700 instruction_count(3); 4701 src1 : S4(read); 4702 mem : S4(read); 4703 DECODE : S0; // any decoder for FPU PUSH 4704 D0 : S0(2); // big decoder only 4705 FPU : S4; 4706 MEM : S3(2); // any mem 4707 %} 4708 4709 // Float load constant 4710 pipe_class fpu_reg_con(regD dst) 4711 %{ 4712 instruction_count(2); 4713 dst : S5(write); 4714 D0 : S0; // big decoder only for the load 4715 DECODE : S1; // any decoder for FPU POP 4716 FPU : S4; 4717 MEM : S3; // any mem 4718 %} 4719 4720 // Float load constant 4721 pipe_class fpu_reg_reg_con(regD dst, regD src) 4722 %{ 4723 instruction_count(3); 4724 dst : S5(write); 4725 src : S3(read); 4726 D0 : S0; // big decoder only for the load 4727 DECODE : S1(2); // any decoder for FPU POP 4728 FPU : S4; 4729 MEM : S3; // any mem 4730 %} 4731 4732 // UnConditional branch 4733 pipe_class pipe_jmp(label labl) 4734 %{ 4735 single_instruction; 4736 BR : S3; 4737 %} 4738 4739 // Conditional branch 4740 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl) 4741 %{ 4742 single_instruction; 4743 cr : S1(read); 4744 BR : S3; 4745 %} 4746 4747 // Allocation idiom 4748 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr) 4749 %{ 4750 instruction_count(1); force_serialization; 4751 fixed_latency(6); 4752 heap_ptr : S3(read); 4753 DECODE : S0(3); 4754 D0 : S2; 4755 MEM : S3; 4756 ALU : S3(2); 4757 dst : S5(write); 4758 BR : S5; 4759 %} 4760 4761 // Generic big/slow expanded idiom 4762 pipe_class pipe_slow() 4763 %{ 4764 instruction_count(10); multiple_bundles; force_serialization; 4765 fixed_latency(100); 4766 D0 : S0(2); 4767 MEM : S3(2); 4768 %} 4769 4770 // The real do-nothing guy 4771 pipe_class empty() 4772 %{ 4773 instruction_count(0); 4774 %} 4775 4776 // Define the class for the Nop node 4777 define 4778 %{ 4779 MachNop = empty; 4780 %} 4781 4782 %} 4783 4784 //----------INSTRUCTIONS------------------------------------------------------- 4785 // 4786 // match -- States which machine-independent subtree may be replaced 4787 // by this instruction. 4788 // ins_cost -- The estimated cost of this instruction is used by instruction 4789 // selection to identify a minimum cost tree of machine 4790 // instructions that matches a tree of machine-independent 4791 // instructions. 4792 // format -- A string providing the disassembly for this instruction. 4793 // The value of an instruction's operand may be inserted 4794 // by referring to it with a '$' prefix. 4795 // opcode -- Three instruction opcodes may be provided. These are referred 4796 // to within an encode class as $primary, $secondary, and $tertiary 4797 // rrspectively. The primary opcode is commonly used to 4798 // indicate the type of machine instruction, while secondary 4799 // and tertiary are often used for prefix options or addressing 4800 // modes. 4801 // ins_encode -- A list of encode classes with parameters. The encode class 4802 // name must have been defined in an 'enc_class' specification 4803 // in the encode section of the architecture description. 4804 4805 //----------Load/Store/Move Instructions--------------------------------------- 4806 //----------Load Instructions-------------------------------------------------- 4807 4808 // Load Byte (8 bit signed) 4809 instruct loadB(rRegI dst, memory mem) 4810 %{ 4811 match(Set dst (LoadB mem)); 4812 4813 ins_cost(125); 4814 format %{ "movsbl $dst, $mem\t# byte" %} 4815 4816 ins_encode %{ 4817 __ movsbl($dst$$Register, $mem$$Address); 4818 %} 4819 4820 ins_pipe(ialu_reg_mem); 4821 %} 4822 4823 // Load Byte (8 bit signed) into Long Register 4824 instruct loadB2L(rRegL dst, memory mem) 4825 %{ 4826 match(Set dst (ConvI2L (LoadB mem))); 4827 4828 ins_cost(125); 4829 format %{ "movsbq $dst, $mem\t# byte -> long" %} 4830 4831 ins_encode %{ 4832 __ movsbq($dst$$Register, $mem$$Address); 4833 %} 4834 4835 ins_pipe(ialu_reg_mem); 4836 %} 4837 4838 // Load Unsigned Byte (8 bit UNsigned) 4839 instruct loadUB(rRegI dst, memory mem) 4840 %{ 4841 match(Set dst (LoadUB mem)); 4842 4843 ins_cost(125); 4844 format %{ "movzbl $dst, $mem\t# ubyte" %} 4845 4846 ins_encode %{ 4847 __ movzbl($dst$$Register, $mem$$Address); 4848 %} 4849 4850 ins_pipe(ialu_reg_mem); 4851 %} 4852 4853 // Load Unsigned Byte (8 bit UNsigned) into Long Register 4854 instruct loadUB2L(rRegL dst, memory mem) 4855 %{ 4856 match(Set dst (ConvI2L (LoadUB mem))); 4857 4858 ins_cost(125); 4859 format %{ "movzbq $dst, $mem\t# ubyte -> long" %} 4860 4861 ins_encode %{ 4862 __ movzbq($dst$$Register, $mem$$Address); 4863 %} 4864 4865 ins_pipe(ialu_reg_mem); 4866 %} 4867 4868 // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register 4869 instruct loadUB2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{ 4870 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 4871 effect(KILL cr); 4872 4873 format %{ "movzbq $dst, $mem\t# ubyte & 32-bit mask -> long\n\t" 4874 "andl $dst, right_n_bits($mask, 8)" %} 4875 ins_encode %{ 4876 Register Rdst = $dst$$Register; 4877 __ movzbq(Rdst, $mem$$Address); 4878 __ andl(Rdst, $mask$$constant & right_n_bits(8)); 4879 %} 4880 ins_pipe(ialu_reg_mem); 4881 %} 4882 4883 // Load Short (16 bit signed) 4884 instruct loadS(rRegI dst, memory mem) 4885 %{ 4886 match(Set dst (LoadS mem)); 4887 4888 ins_cost(125); 4889 format %{ "movswl $dst, $mem\t# short" %} 4890 4891 ins_encode %{ 4892 __ movswl($dst$$Register, $mem$$Address); 4893 %} 4894 4895 ins_pipe(ialu_reg_mem); 4896 %} 4897 4898 // Load Short (16 bit signed) to Byte (8 bit signed) 4899 instruct loadS2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 4900 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 4901 4902 ins_cost(125); 4903 format %{ "movsbl $dst, $mem\t# short -> byte" %} 4904 ins_encode %{ 4905 __ movsbl($dst$$Register, $mem$$Address); 4906 %} 4907 ins_pipe(ialu_reg_mem); 4908 %} 4909 4910 // Load Short (16 bit signed) into Long Register 4911 instruct loadS2L(rRegL dst, memory mem) 4912 %{ 4913 match(Set dst (ConvI2L (LoadS mem))); 4914 4915 ins_cost(125); 4916 format %{ "movswq $dst, $mem\t# short -> long" %} 4917 4918 ins_encode %{ 4919 __ movswq($dst$$Register, $mem$$Address); 4920 %} 4921 4922 ins_pipe(ialu_reg_mem); 4923 %} 4924 4925 // Load Unsigned Short/Char (16 bit UNsigned) 4926 instruct loadUS(rRegI dst, memory mem) 4927 %{ 4928 match(Set dst (LoadUS mem)); 4929 4930 ins_cost(125); 4931 format %{ "movzwl $dst, $mem\t# ushort/char" %} 4932 4933 ins_encode %{ 4934 __ movzwl($dst$$Register, $mem$$Address); 4935 %} 4936 4937 ins_pipe(ialu_reg_mem); 4938 %} 4939 4940 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 4941 instruct loadUS2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 4942 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 4943 4944 ins_cost(125); 4945 format %{ "movsbl $dst, $mem\t# ushort -> byte" %} 4946 ins_encode %{ 4947 __ movsbl($dst$$Register, $mem$$Address); 4948 %} 4949 ins_pipe(ialu_reg_mem); 4950 %} 4951 4952 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register 4953 instruct loadUS2L(rRegL dst, memory mem) 4954 %{ 4955 match(Set dst (ConvI2L (LoadUS mem))); 4956 4957 ins_cost(125); 4958 format %{ "movzwq $dst, $mem\t# ushort/char -> long" %} 4959 4960 ins_encode %{ 4961 __ movzwq($dst$$Register, $mem$$Address); 4962 %} 4963 4964 ins_pipe(ialu_reg_mem); 4965 %} 4966 4967 // Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register 4968 instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{ 4969 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 4970 4971 format %{ "movzbq $dst, $mem\t# ushort/char & 0xFF -> long" %} 4972 ins_encode %{ 4973 __ movzbq($dst$$Register, $mem$$Address); 4974 %} 4975 ins_pipe(ialu_reg_mem); 4976 %} 4977 4978 // Load Unsigned Short/Char (16 bit UNsigned) with 32-bit mask into Long Register 4979 instruct loadUS2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{ 4980 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 4981 effect(KILL cr); 4982 4983 format %{ "movzwq $dst, $mem\t# ushort/char & 32-bit mask -> long\n\t" 4984 "andl $dst, right_n_bits($mask, 16)" %} 4985 ins_encode %{ 4986 Register Rdst = $dst$$Register; 4987 __ movzwq(Rdst, $mem$$Address); 4988 __ andl(Rdst, $mask$$constant & right_n_bits(16)); 4989 %} 4990 ins_pipe(ialu_reg_mem); 4991 %} 4992 4993 // Load Integer 4994 instruct loadI(rRegI dst, memory mem) 4995 %{ 4996 match(Set dst (LoadI mem)); 4997 4998 ins_cost(125); 4999 format %{ "movl $dst, $mem\t# int" %} 5000 5001 ins_encode %{ 5002 __ movl($dst$$Register, $mem$$Address); 5003 %} 5004 5005 ins_pipe(ialu_reg_mem); 5006 %} 5007 5008 // Load Integer (32 bit signed) to Byte (8 bit signed) 5009 instruct loadI2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 5010 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5011 5012 ins_cost(125); 5013 format %{ "movsbl $dst, $mem\t# int -> byte" %} 5014 ins_encode %{ 5015 __ movsbl($dst$$Register, $mem$$Address); 5016 %} 5017 ins_pipe(ialu_reg_mem); 5018 %} 5019 5020 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned) 5021 instruct loadI2UB(rRegI dst, memory mem, immI_255 mask) %{ 5022 match(Set dst (AndI (LoadI mem) mask)); 5023 5024 ins_cost(125); 5025 format %{ "movzbl $dst, $mem\t# int -> ubyte" %} 5026 ins_encode %{ 5027 __ movzbl($dst$$Register, $mem$$Address); 5028 %} 5029 ins_pipe(ialu_reg_mem); 5030 %} 5031 5032 // Load Integer (32 bit signed) to Short (16 bit signed) 5033 instruct loadI2S(rRegI dst, memory mem, immI_16 sixteen) %{ 5034 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5035 5036 ins_cost(125); 5037 format %{ "movswl $dst, $mem\t# int -> short" %} 5038 ins_encode %{ 5039 __ movswl($dst$$Register, $mem$$Address); 5040 %} 5041 ins_pipe(ialu_reg_mem); 5042 %} 5043 5044 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned) 5045 instruct loadI2US(rRegI dst, memory mem, immI_65535 mask) %{ 5046 match(Set dst (AndI (LoadI mem) mask)); 5047 5048 ins_cost(125); 5049 format %{ "movzwl $dst, $mem\t# int -> ushort/char" %} 5050 ins_encode %{ 5051 __ movzwl($dst$$Register, $mem$$Address); 5052 %} 5053 ins_pipe(ialu_reg_mem); 5054 %} 5055 5056 // Load Integer into Long Register 5057 instruct loadI2L(rRegL dst, memory mem) 5058 %{ 5059 match(Set dst (ConvI2L (LoadI mem))); 5060 5061 ins_cost(125); 5062 format %{ "movslq $dst, $mem\t# int -> long" %} 5063 5064 ins_encode %{ 5065 __ movslq($dst$$Register, $mem$$Address); 5066 %} 5067 5068 ins_pipe(ialu_reg_mem); 5069 %} 5070 5071 // Load Integer with mask 0xFF into Long Register 5072 instruct loadI2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{ 5073 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5074 5075 format %{ "movzbq $dst, $mem\t# int & 0xFF -> long" %} 5076 ins_encode %{ 5077 __ movzbq($dst$$Register, $mem$$Address); 5078 %} 5079 ins_pipe(ialu_reg_mem); 5080 %} 5081 5082 // Load Integer with mask 0xFFFF into Long Register 5083 instruct loadI2L_immI_65535(rRegL dst, memory mem, immI_65535 mask) %{ 5084 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5085 5086 format %{ "movzwq $dst, $mem\t# int & 0xFFFF -> long" %} 5087 ins_encode %{ 5088 __ movzwq($dst$$Register, $mem$$Address); 5089 %} 5090 ins_pipe(ialu_reg_mem); 5091 %} 5092 5093 // Load Integer with a 31-bit mask into Long Register 5094 instruct loadI2L_immU31(rRegL dst, memory mem, immU31 mask, rFlagsReg cr) %{ 5095 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5096 effect(KILL cr); 5097 5098 format %{ "movl $dst, $mem\t# int & 31-bit mask -> long\n\t" 5099 "andl $dst, $mask" %} 5100 ins_encode %{ 5101 Register Rdst = $dst$$Register; 5102 __ movl(Rdst, $mem$$Address); 5103 __ andl(Rdst, $mask$$constant); 5104 %} 5105 ins_pipe(ialu_reg_mem); 5106 %} 5107 5108 // Load Unsigned Integer into Long Register 5109 instruct loadUI2L(rRegL dst, memory mem, immL_32bits mask) 5110 %{ 5111 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5112 5113 ins_cost(125); 5114 format %{ "movl $dst, $mem\t# uint -> long" %} 5115 5116 ins_encode %{ 5117 __ movl($dst$$Register, $mem$$Address); 5118 %} 5119 5120 ins_pipe(ialu_reg_mem); 5121 %} 5122 5123 // Load Long 5124 instruct loadL(rRegL dst, memory mem) 5125 %{ 5126 match(Set dst (LoadL mem)); 5127 5128 ins_cost(125); 5129 format %{ "movq $dst, $mem\t# long" %} 5130 5131 ins_encode %{ 5132 __ movq($dst$$Register, $mem$$Address); 5133 %} 5134 5135 ins_pipe(ialu_reg_mem); // XXX 5136 %} 5137 5138 // Load Range 5139 instruct loadRange(rRegI dst, memory mem) 5140 %{ 5141 match(Set dst (LoadRange mem)); 5142 5143 ins_cost(125); // XXX 5144 format %{ "movl $dst, $mem\t# range" %} 5145 ins_encode %{ 5146 __ movl($dst$$Register, $mem$$Address); 5147 %} 5148 ins_pipe(ialu_reg_mem); 5149 %} 5150 5151 // Load Pointer 5152 instruct loadP(rRegP dst, memory mem) 5153 %{ 5154 match(Set dst (LoadP mem)); 5155 predicate(n->as_Load()->barrier_data() == 0); 5156 5157 ins_cost(125); // XXX 5158 format %{ "movq $dst, $mem\t# ptr" %} 5159 ins_encode %{ 5160 __ movq($dst$$Register, $mem$$Address); 5161 %} 5162 ins_pipe(ialu_reg_mem); // XXX 5163 %} 5164 5165 // Load Compressed Pointer 5166 instruct loadN(rRegN dst, memory mem) 5167 %{ 5168 match(Set dst (LoadN mem)); 5169 5170 ins_cost(125); // XXX 5171 format %{ "movl $dst, $mem\t# compressed ptr" %} 5172 ins_encode %{ 5173 __ movl($dst$$Register, $mem$$Address); 5174 %} 5175 ins_pipe(ialu_reg_mem); // XXX 5176 %} 5177 5178 5179 // Load Klass Pointer 5180 instruct loadKlass(rRegP dst, memory mem) 5181 %{ 5182 match(Set dst (LoadKlass mem)); 5183 5184 ins_cost(125); // XXX 5185 format %{ "movq $dst, $mem\t# class" %} 5186 ins_encode %{ 5187 __ movq($dst$$Register, $mem$$Address); 5188 %} 5189 ins_pipe(ialu_reg_mem); // XXX 5190 %} 5191 5192 // Load narrow Klass Pointer 5193 instruct loadNKlass(rRegN dst, memory mem) 5194 %{ 5195 predicate(!UseCompactObjectHeaders); 5196 match(Set dst (LoadNKlass mem)); 5197 5198 ins_cost(125); // XXX 5199 format %{ "movl $dst, $mem\t# compressed klass ptr" %} 5200 ins_encode %{ 5201 __ movl($dst$$Register, $mem$$Address); 5202 %} 5203 ins_pipe(ialu_reg_mem); // XXX 5204 %} 5205 5206 instruct loadNKlassLilliput(rRegN dst, indOffset8 mem, rFlagsReg cr) 5207 %{ 5208 predicate(UseCompactObjectHeaders); 5209 match(Set dst (LoadNKlass mem)); 5210 effect(KILL cr); 5211 ins_cost(125); // XXX 5212 format %{ "movl $dst, $mem\t# compressed klass ptr" %} 5213 ins_encode %{ 5214 assert($mem$$disp == oopDesc::klass_offset_in_bytes(), "expect correct offset 4, but got: %d", $mem$$disp); 5215 assert($mem$$index == 4, "expect no index register: %d", $mem$$index); 5216 Register dst = $dst$$Register; 5217 Register obj = $mem$$base$$Register; 5218 C2LoadNKlassStub* stub = new (Compile::current()->comp_arena()) C2LoadNKlassStub(dst); 5219 Compile::current()->output()->add_stub(stub); 5220 __ movq(dst, Address(obj, oopDesc::mark_offset_in_bytes())); 5221 __ testb(dst, markWord::monitor_value); 5222 __ jcc(Assembler::notZero, stub->entry()); 5223 __ bind(stub->continuation()); 5224 __ shrq(dst, markWord::klass_shift); 5225 %} 5226 ins_pipe(pipe_slow); // XXX 5227 %} 5228 5229 // Load Float 5230 instruct loadF(regF dst, memory mem) 5231 %{ 5232 match(Set dst (LoadF mem)); 5233 5234 ins_cost(145); // XXX 5235 format %{ "movss $dst, $mem\t# float" %} 5236 ins_encode %{ 5237 __ movflt($dst$$XMMRegister, $mem$$Address); 5238 %} 5239 ins_pipe(pipe_slow); // XXX 5240 %} 5241 5242 // Load Float 5243 instruct MoveF2VL(vlRegF dst, regF src) %{ 5244 match(Set dst src); 5245 format %{ "movss $dst,$src\t! load float (4 bytes)" %} 5246 ins_encode %{ 5247 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 5248 %} 5249 ins_pipe( fpu_reg_reg ); 5250 %} 5251 5252 // Load Float 5253 instruct MoveF2LEG(legRegF dst, regF src) %{ 5254 match(Set dst src); 5255 format %{ "movss $dst,$src\t# if src != dst load float (4 bytes)" %} 5256 ins_encode %{ 5257 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 5258 %} 5259 ins_pipe( fpu_reg_reg ); 5260 %} 5261 5262 // Load Float 5263 instruct MoveVL2F(regF dst, vlRegF src) %{ 5264 match(Set dst src); 5265 format %{ "movss $dst,$src\t! load float (4 bytes)" %} 5266 ins_encode %{ 5267 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 5268 %} 5269 ins_pipe( fpu_reg_reg ); 5270 %} 5271 5272 // Load Float 5273 instruct MoveLEG2F(regF dst, legRegF src) %{ 5274 match(Set dst src); 5275 format %{ "movss $dst,$src\t# if src != dst load float (4 bytes)" %} 5276 ins_encode %{ 5277 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 5278 %} 5279 ins_pipe( fpu_reg_reg ); 5280 %} 5281 5282 // Load Double 5283 instruct loadD_partial(regD dst, memory mem) 5284 %{ 5285 predicate(!UseXmmLoadAndClearUpper); 5286 match(Set dst (LoadD mem)); 5287 5288 ins_cost(145); // XXX 5289 format %{ "movlpd $dst, $mem\t# double" %} 5290 ins_encode %{ 5291 __ movdbl($dst$$XMMRegister, $mem$$Address); 5292 %} 5293 ins_pipe(pipe_slow); // XXX 5294 %} 5295 5296 instruct loadD(regD dst, memory mem) 5297 %{ 5298 predicate(UseXmmLoadAndClearUpper); 5299 match(Set dst (LoadD mem)); 5300 5301 ins_cost(145); // XXX 5302 format %{ "movsd $dst, $mem\t# double" %} 5303 ins_encode %{ 5304 __ movdbl($dst$$XMMRegister, $mem$$Address); 5305 %} 5306 ins_pipe(pipe_slow); // XXX 5307 %} 5308 5309 // Load Double 5310 instruct MoveD2VL(vlRegD dst, regD src) %{ 5311 match(Set dst src); 5312 format %{ "movsd $dst,$src\t! load double (8 bytes)" %} 5313 ins_encode %{ 5314 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 5315 %} 5316 ins_pipe( fpu_reg_reg ); 5317 %} 5318 5319 // Load Double 5320 instruct MoveD2LEG(legRegD dst, regD src) %{ 5321 match(Set dst src); 5322 format %{ "movsd $dst,$src\t# if src != dst load double (8 bytes)" %} 5323 ins_encode %{ 5324 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 5325 %} 5326 ins_pipe( fpu_reg_reg ); 5327 %} 5328 5329 // Load Double 5330 instruct MoveVL2D(regD dst, vlRegD src) %{ 5331 match(Set dst src); 5332 format %{ "movsd $dst,$src\t! load double (8 bytes)" %} 5333 ins_encode %{ 5334 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 5335 %} 5336 ins_pipe( fpu_reg_reg ); 5337 %} 5338 5339 // Load Double 5340 instruct MoveLEG2D(regD dst, legRegD src) %{ 5341 match(Set dst src); 5342 format %{ "movsd $dst,$src\t# if src != dst load double (8 bytes)" %} 5343 ins_encode %{ 5344 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 5345 %} 5346 ins_pipe( fpu_reg_reg ); 5347 %} 5348 5349 // Following pseudo code describes the algorithm for max[FD]: 5350 // Min algorithm is on similar lines 5351 // btmp = (b < +0.0) ? a : b 5352 // atmp = (b < +0.0) ? b : a 5353 // Tmp = Max_Float(atmp , btmp) 5354 // Res = (atmp == NaN) ? atmp : Tmp 5355 5356 // max = java.lang.Math.max(float a, float b) 5357 instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ 5358 predicate(UseAVX > 0 && !n->is_reduction()); 5359 match(Set dst (MaxF a b)); 5360 effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); 5361 format %{ 5362 "vblendvps $btmp,$b,$a,$b \n\t" 5363 "vblendvps $atmp,$a,$b,$b \n\t" 5364 "vmaxss $tmp,$atmp,$btmp \n\t" 5365 "vcmpps.unordered $btmp,$atmp,$atmp \n\t" 5366 "vblendvps $dst,$tmp,$atmp,$btmp \n\t" 5367 %} 5368 ins_encode %{ 5369 int vector_len = Assembler::AVX_128bit; 5370 __ vblendvps($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, vector_len); 5371 __ vblendvps($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $b$$XMMRegister, vector_len); 5372 __ vmaxss($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); 5373 __ vcmpps($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); 5374 __ vblendvps($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); 5375 %} 5376 ins_pipe( pipe_slow ); 5377 %} 5378 5379 instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{ 5380 predicate(UseAVX > 0 && n->is_reduction()); 5381 match(Set dst (MaxF a b)); 5382 effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); 5383 5384 format %{ "$dst = max($a, $b)\t# intrinsic (float)" %} 5385 ins_encode %{ 5386 emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register, 5387 false /*min*/, true /*single*/); 5388 %} 5389 ins_pipe( pipe_slow ); 5390 %} 5391 5392 // max = java.lang.Math.max(double a, double b) 5393 instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ 5394 predicate(UseAVX > 0 && !n->is_reduction()); 5395 match(Set dst (MaxD a b)); 5396 effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp); 5397 format %{ 5398 "vblendvpd $btmp,$b,$a,$b \n\t" 5399 "vblendvpd $atmp,$a,$b,$b \n\t" 5400 "vmaxsd $tmp,$atmp,$btmp \n\t" 5401 "vcmppd.unordered $btmp,$atmp,$atmp \n\t" 5402 "vblendvpd $dst,$tmp,$atmp,$btmp \n\t" 5403 %} 5404 ins_encode %{ 5405 int vector_len = Assembler::AVX_128bit; 5406 __ vblendvpd($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, vector_len); 5407 __ vblendvpd($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $b$$XMMRegister, vector_len); 5408 __ vmaxsd($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); 5409 __ vcmppd($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); 5410 __ vblendvpd($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); 5411 %} 5412 ins_pipe( pipe_slow ); 5413 %} 5414 5415 instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{ 5416 predicate(UseAVX > 0 && n->is_reduction()); 5417 match(Set dst (MaxD a b)); 5418 effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); 5419 5420 format %{ "$dst = max($a, $b)\t# intrinsic (double)" %} 5421 ins_encode %{ 5422 emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register, 5423 false /*min*/, false /*single*/); 5424 %} 5425 ins_pipe( pipe_slow ); 5426 %} 5427 5428 // min = java.lang.Math.min(float a, float b) 5429 instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ 5430 predicate(UseAVX > 0 && !n->is_reduction()); 5431 match(Set dst (MinF a b)); 5432 effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); 5433 format %{ 5434 "vblendvps $atmp,$a,$b,$a \n\t" 5435 "vblendvps $btmp,$b,$a,$a \n\t" 5436 "vminss $tmp,$atmp,$btmp \n\t" 5437 "vcmpps.unordered $btmp,$atmp,$atmp \n\t" 5438 "vblendvps $dst,$tmp,$atmp,$btmp \n\t" 5439 %} 5440 ins_encode %{ 5441 int vector_len = Assembler::AVX_128bit; 5442 __ vblendvps($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, vector_len); 5443 __ vblendvps($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $a$$XMMRegister, vector_len); 5444 __ vminss($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); 5445 __ vcmpps($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); 5446 __ vblendvps($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); 5447 %} 5448 ins_pipe( pipe_slow ); 5449 %} 5450 5451 instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{ 5452 predicate(UseAVX > 0 && n->is_reduction()); 5453 match(Set dst (MinF a b)); 5454 effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); 5455 5456 format %{ "$dst = min($a, $b)\t# intrinsic (float)" %} 5457 ins_encode %{ 5458 emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register, 5459 true /*min*/, true /*single*/); 5460 %} 5461 ins_pipe( pipe_slow ); 5462 %} 5463 5464 // min = java.lang.Math.min(double a, double b) 5465 instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ 5466 predicate(UseAVX > 0 && !n->is_reduction()); 5467 match(Set dst (MinD a b)); 5468 effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); 5469 format %{ 5470 "vblendvpd $atmp,$a,$b,$a \n\t" 5471 "vblendvpd $btmp,$b,$a,$a \n\t" 5472 "vminsd $tmp,$atmp,$btmp \n\t" 5473 "vcmppd.unordered $btmp,$atmp,$atmp \n\t" 5474 "vblendvpd $dst,$tmp,$atmp,$btmp \n\t" 5475 %} 5476 ins_encode %{ 5477 int vector_len = Assembler::AVX_128bit; 5478 __ vblendvpd($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, vector_len); 5479 __ vblendvpd($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $a$$XMMRegister, vector_len); 5480 __ vminsd($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); 5481 __ vcmppd($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); 5482 __ vblendvpd($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); 5483 %} 5484 ins_pipe( pipe_slow ); 5485 %} 5486 5487 instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{ 5488 predicate(UseAVX > 0 && n->is_reduction()); 5489 match(Set dst (MinD a b)); 5490 effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); 5491 5492 format %{ "$dst = min($a, $b)\t# intrinsic (double)" %} 5493 ins_encode %{ 5494 emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register, 5495 true /*min*/, false /*single*/); 5496 %} 5497 ins_pipe( pipe_slow ); 5498 %} 5499 5500 // Load Effective Address 5501 instruct leaP8(rRegP dst, indOffset8 mem) 5502 %{ 5503 match(Set dst mem); 5504 5505 ins_cost(110); // XXX 5506 format %{ "leaq $dst, $mem\t# ptr 8" %} 5507 ins_encode %{ 5508 __ leaq($dst$$Register, $mem$$Address); 5509 %} 5510 ins_pipe(ialu_reg_reg_fat); 5511 %} 5512 5513 instruct leaP32(rRegP dst, indOffset32 mem) 5514 %{ 5515 match(Set dst mem); 5516 5517 ins_cost(110); 5518 format %{ "leaq $dst, $mem\t# ptr 32" %} 5519 ins_encode %{ 5520 __ leaq($dst$$Register, $mem$$Address); 5521 %} 5522 ins_pipe(ialu_reg_reg_fat); 5523 %} 5524 5525 instruct leaPIdxOff(rRegP dst, indIndexOffset mem) 5526 %{ 5527 match(Set dst mem); 5528 5529 ins_cost(110); 5530 format %{ "leaq $dst, $mem\t# ptr idxoff" %} 5531 ins_encode %{ 5532 __ leaq($dst$$Register, $mem$$Address); 5533 %} 5534 ins_pipe(ialu_reg_reg_fat); 5535 %} 5536 5537 instruct leaPIdxScale(rRegP dst, indIndexScale mem) 5538 %{ 5539 match(Set dst mem); 5540 5541 ins_cost(110); 5542 format %{ "leaq $dst, $mem\t# ptr idxscale" %} 5543 ins_encode %{ 5544 __ leaq($dst$$Register, $mem$$Address); 5545 %} 5546 ins_pipe(ialu_reg_reg_fat); 5547 %} 5548 5549 instruct leaPPosIdxScale(rRegP dst, indPosIndexScale mem) 5550 %{ 5551 match(Set dst mem); 5552 5553 ins_cost(110); 5554 format %{ "leaq $dst, $mem\t# ptr idxscale" %} 5555 ins_encode %{ 5556 __ leaq($dst$$Register, $mem$$Address); 5557 %} 5558 ins_pipe(ialu_reg_reg_fat); 5559 %} 5560 5561 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem) 5562 %{ 5563 match(Set dst mem); 5564 5565 ins_cost(110); 5566 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %} 5567 ins_encode %{ 5568 __ leaq($dst$$Register, $mem$$Address); 5569 %} 5570 ins_pipe(ialu_reg_reg_fat); 5571 %} 5572 5573 instruct leaPPosIdxOff(rRegP dst, indPosIndexOffset mem) 5574 %{ 5575 match(Set dst mem); 5576 5577 ins_cost(110); 5578 format %{ "leaq $dst, $mem\t# ptr posidxoff" %} 5579 ins_encode %{ 5580 __ leaq($dst$$Register, $mem$$Address); 5581 %} 5582 ins_pipe(ialu_reg_reg_fat); 5583 %} 5584 5585 instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem) 5586 %{ 5587 match(Set dst mem); 5588 5589 ins_cost(110); 5590 format %{ "leaq $dst, $mem\t# ptr posidxscaleoff" %} 5591 ins_encode %{ 5592 __ leaq($dst$$Register, $mem$$Address); 5593 %} 5594 ins_pipe(ialu_reg_reg_fat); 5595 %} 5596 5597 // Load Effective Address which uses Narrow (32-bits) oop 5598 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem) 5599 %{ 5600 predicate(UseCompressedOops && (CompressedOops::shift() != 0)); 5601 match(Set dst mem); 5602 5603 ins_cost(110); 5604 format %{ "leaq $dst, $mem\t# ptr compressedoopoff32" %} 5605 ins_encode %{ 5606 __ leaq($dst$$Register, $mem$$Address); 5607 %} 5608 ins_pipe(ialu_reg_reg_fat); 5609 %} 5610 5611 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem) 5612 %{ 5613 predicate(CompressedOops::shift() == 0); 5614 match(Set dst mem); 5615 5616 ins_cost(110); // XXX 5617 format %{ "leaq $dst, $mem\t# ptr off8narrow" %} 5618 ins_encode %{ 5619 __ leaq($dst$$Register, $mem$$Address); 5620 %} 5621 ins_pipe(ialu_reg_reg_fat); 5622 %} 5623 5624 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem) 5625 %{ 5626 predicate(CompressedOops::shift() == 0); 5627 match(Set dst mem); 5628 5629 ins_cost(110); 5630 format %{ "leaq $dst, $mem\t# ptr off32narrow" %} 5631 ins_encode %{ 5632 __ leaq($dst$$Register, $mem$$Address); 5633 %} 5634 ins_pipe(ialu_reg_reg_fat); 5635 %} 5636 5637 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem) 5638 %{ 5639 predicate(CompressedOops::shift() == 0); 5640 match(Set dst mem); 5641 5642 ins_cost(110); 5643 format %{ "leaq $dst, $mem\t# ptr idxoffnarrow" %} 5644 ins_encode %{ 5645 __ leaq($dst$$Register, $mem$$Address); 5646 %} 5647 ins_pipe(ialu_reg_reg_fat); 5648 %} 5649 5650 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem) 5651 %{ 5652 predicate(CompressedOops::shift() == 0); 5653 match(Set dst mem); 5654 5655 ins_cost(110); 5656 format %{ "leaq $dst, $mem\t# ptr idxscalenarrow" %} 5657 ins_encode %{ 5658 __ leaq($dst$$Register, $mem$$Address); 5659 %} 5660 ins_pipe(ialu_reg_reg_fat); 5661 %} 5662 5663 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem) 5664 %{ 5665 predicate(CompressedOops::shift() == 0); 5666 match(Set dst mem); 5667 5668 ins_cost(110); 5669 format %{ "leaq $dst, $mem\t# ptr idxscaleoffnarrow" %} 5670 ins_encode %{ 5671 __ leaq($dst$$Register, $mem$$Address); 5672 %} 5673 ins_pipe(ialu_reg_reg_fat); 5674 %} 5675 5676 instruct leaPPosIdxOffNarrow(rRegP dst, indPosIndexOffsetNarrow mem) 5677 %{ 5678 predicate(CompressedOops::shift() == 0); 5679 match(Set dst mem); 5680 5681 ins_cost(110); 5682 format %{ "leaq $dst, $mem\t# ptr posidxoffnarrow" %} 5683 ins_encode %{ 5684 __ leaq($dst$$Register, $mem$$Address); 5685 %} 5686 ins_pipe(ialu_reg_reg_fat); 5687 %} 5688 5689 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem) 5690 %{ 5691 predicate(CompressedOops::shift() == 0); 5692 match(Set dst mem); 5693 5694 ins_cost(110); 5695 format %{ "leaq $dst, $mem\t# ptr posidxscaleoffnarrow" %} 5696 ins_encode %{ 5697 __ leaq($dst$$Register, $mem$$Address); 5698 %} 5699 ins_pipe(ialu_reg_reg_fat); 5700 %} 5701 5702 instruct loadConI(rRegI dst, immI src) 5703 %{ 5704 match(Set dst src); 5705 5706 format %{ "movl $dst, $src\t# int" %} 5707 ins_encode %{ 5708 __ movl($dst$$Register, $src$$constant); 5709 %} 5710 ins_pipe(ialu_reg_fat); // XXX 5711 %} 5712 5713 instruct loadConI0(rRegI dst, immI_0 src, rFlagsReg cr) 5714 %{ 5715 match(Set dst src); 5716 effect(KILL cr); 5717 5718 ins_cost(50); 5719 format %{ "xorl $dst, $dst\t# int" %} 5720 ins_encode %{ 5721 __ xorl($dst$$Register, $dst$$Register); 5722 %} 5723 ins_pipe(ialu_reg); 5724 %} 5725 5726 instruct loadConL(rRegL dst, immL src) 5727 %{ 5728 match(Set dst src); 5729 5730 ins_cost(150); 5731 format %{ "movq $dst, $src\t# long" %} 5732 ins_encode %{ 5733 __ mov64($dst$$Register, $src$$constant); 5734 %} 5735 ins_pipe(ialu_reg); 5736 %} 5737 5738 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr) 5739 %{ 5740 match(Set dst src); 5741 effect(KILL cr); 5742 5743 ins_cost(50); 5744 format %{ "xorl $dst, $dst\t# long" %} 5745 ins_encode %{ 5746 __ xorl($dst$$Register, $dst$$Register); 5747 %} 5748 ins_pipe(ialu_reg); // XXX 5749 %} 5750 5751 instruct loadConUL32(rRegL dst, immUL32 src) 5752 %{ 5753 match(Set dst src); 5754 5755 ins_cost(60); 5756 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %} 5757 ins_encode %{ 5758 __ movl($dst$$Register, $src$$constant); 5759 %} 5760 ins_pipe(ialu_reg); 5761 %} 5762 5763 instruct loadConL32(rRegL dst, immL32 src) 5764 %{ 5765 match(Set dst src); 5766 5767 ins_cost(70); 5768 format %{ "movq $dst, $src\t# long (32-bit)" %} 5769 ins_encode %{ 5770 __ movq($dst$$Register, $src$$constant); 5771 %} 5772 ins_pipe(ialu_reg); 5773 %} 5774 5775 instruct loadConP(rRegP dst, immP con) %{ 5776 match(Set dst con); 5777 5778 format %{ "movq $dst, $con\t# ptr" %} 5779 ins_encode %{ 5780 __ mov64($dst$$Register, $con$$constant, $con->constant_reloc(), RELOC_IMM64); 5781 %} 5782 ins_pipe(ialu_reg_fat); // XXX 5783 %} 5784 5785 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr) 5786 %{ 5787 match(Set dst src); 5788 effect(KILL cr); 5789 5790 ins_cost(50); 5791 format %{ "xorl $dst, $dst\t# ptr" %} 5792 ins_encode %{ 5793 __ xorl($dst$$Register, $dst$$Register); 5794 %} 5795 ins_pipe(ialu_reg); 5796 %} 5797 5798 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr) 5799 %{ 5800 match(Set dst src); 5801 effect(KILL cr); 5802 5803 ins_cost(60); 5804 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %} 5805 ins_encode %{ 5806 __ movl($dst$$Register, $src$$constant); 5807 %} 5808 ins_pipe(ialu_reg); 5809 %} 5810 5811 instruct loadConF(regF dst, immF con) %{ 5812 match(Set dst con); 5813 ins_cost(125); 5814 format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 5815 ins_encode %{ 5816 __ movflt($dst$$XMMRegister, $constantaddress($con)); 5817 %} 5818 ins_pipe(pipe_slow); 5819 %} 5820 5821 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{ 5822 match(Set dst src); 5823 effect(KILL cr); 5824 format %{ "xorq $dst, $src\t# compressed NULL ptr" %} 5825 ins_encode %{ 5826 __ xorq($dst$$Register, $dst$$Register); 5827 %} 5828 ins_pipe(ialu_reg); 5829 %} 5830 5831 instruct loadConN(rRegN dst, immN src) %{ 5832 match(Set dst src); 5833 5834 ins_cost(125); 5835 format %{ "movl $dst, $src\t# compressed ptr" %} 5836 ins_encode %{ 5837 address con = (address)$src$$constant; 5838 if (con == NULL) { 5839 ShouldNotReachHere(); 5840 } else { 5841 __ set_narrow_oop($dst$$Register, (jobject)$src$$constant); 5842 } 5843 %} 5844 ins_pipe(ialu_reg_fat); // XXX 5845 %} 5846 5847 instruct loadConNKlass(rRegN dst, immNKlass src) %{ 5848 match(Set dst src); 5849 5850 ins_cost(125); 5851 format %{ "movl $dst, $src\t# compressed klass ptr" %} 5852 ins_encode %{ 5853 address con = (address)$src$$constant; 5854 if (con == NULL) { 5855 ShouldNotReachHere(); 5856 } else { 5857 __ set_narrow_klass($dst$$Register, (Klass*)$src$$constant); 5858 } 5859 %} 5860 ins_pipe(ialu_reg_fat); // XXX 5861 %} 5862 5863 instruct loadConF0(regF dst, immF0 src) 5864 %{ 5865 match(Set dst src); 5866 ins_cost(100); 5867 5868 format %{ "xorps $dst, $dst\t# float 0.0" %} 5869 ins_encode %{ 5870 __ xorps($dst$$XMMRegister, $dst$$XMMRegister); 5871 %} 5872 ins_pipe(pipe_slow); 5873 %} 5874 5875 // Use the same format since predicate() can not be used here. 5876 instruct loadConD(regD dst, immD con) %{ 5877 match(Set dst con); 5878 ins_cost(125); 5879 format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 5880 ins_encode %{ 5881 __ movdbl($dst$$XMMRegister, $constantaddress($con)); 5882 %} 5883 ins_pipe(pipe_slow); 5884 %} 5885 5886 instruct loadConD0(regD dst, immD0 src) 5887 %{ 5888 match(Set dst src); 5889 ins_cost(100); 5890 5891 format %{ "xorpd $dst, $dst\t# double 0.0" %} 5892 ins_encode %{ 5893 __ xorpd ($dst$$XMMRegister, $dst$$XMMRegister); 5894 %} 5895 ins_pipe(pipe_slow); 5896 %} 5897 5898 instruct loadSSI(rRegI dst, stackSlotI src) 5899 %{ 5900 match(Set dst src); 5901 5902 ins_cost(125); 5903 format %{ "movl $dst, $src\t# int stk" %} 5904 opcode(0x8B); 5905 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 5906 ins_pipe(ialu_reg_mem); 5907 %} 5908 5909 instruct loadSSL(rRegL dst, stackSlotL src) 5910 %{ 5911 match(Set dst src); 5912 5913 ins_cost(125); 5914 format %{ "movq $dst, $src\t# long stk" %} 5915 opcode(0x8B); 5916 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 5917 ins_pipe(ialu_reg_mem); 5918 %} 5919 5920 instruct loadSSP(rRegP dst, stackSlotP src) 5921 %{ 5922 match(Set dst src); 5923 5924 ins_cost(125); 5925 format %{ "movq $dst, $src\t# ptr stk" %} 5926 opcode(0x8B); 5927 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 5928 ins_pipe(ialu_reg_mem); 5929 %} 5930 5931 instruct loadSSF(regF dst, stackSlotF src) 5932 %{ 5933 match(Set dst src); 5934 5935 ins_cost(125); 5936 format %{ "movss $dst, $src\t# float stk" %} 5937 ins_encode %{ 5938 __ movflt($dst$$XMMRegister, Address(rsp, $src$$disp)); 5939 %} 5940 ins_pipe(pipe_slow); // XXX 5941 %} 5942 5943 // Use the same format since predicate() can not be used here. 5944 instruct loadSSD(regD dst, stackSlotD src) 5945 %{ 5946 match(Set dst src); 5947 5948 ins_cost(125); 5949 format %{ "movsd $dst, $src\t# double stk" %} 5950 ins_encode %{ 5951 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp)); 5952 %} 5953 ins_pipe(pipe_slow); // XXX 5954 %} 5955 5956 // Prefetch instructions for allocation. 5957 // Must be safe to execute with invalid address (cannot fault). 5958 5959 instruct prefetchAlloc( memory mem ) %{ 5960 predicate(AllocatePrefetchInstr==3); 5961 match(PrefetchAllocation mem); 5962 ins_cost(125); 5963 5964 format %{ "PREFETCHW $mem\t# Prefetch allocation into level 1 cache and mark modified" %} 5965 ins_encode %{ 5966 __ prefetchw($mem$$Address); 5967 %} 5968 ins_pipe(ialu_mem); 5969 %} 5970 5971 instruct prefetchAllocNTA( memory mem ) %{ 5972 predicate(AllocatePrefetchInstr==0); 5973 match(PrefetchAllocation mem); 5974 ins_cost(125); 5975 5976 format %{ "PREFETCHNTA $mem\t# Prefetch allocation to non-temporal cache for write" %} 5977 ins_encode %{ 5978 __ prefetchnta($mem$$Address); 5979 %} 5980 ins_pipe(ialu_mem); 5981 %} 5982 5983 instruct prefetchAllocT0( memory mem ) %{ 5984 predicate(AllocatePrefetchInstr==1); 5985 match(PrefetchAllocation mem); 5986 ins_cost(125); 5987 5988 format %{ "PREFETCHT0 $mem\t# Prefetch allocation to level 1 and 2 caches for write" %} 5989 ins_encode %{ 5990 __ prefetcht0($mem$$Address); 5991 %} 5992 ins_pipe(ialu_mem); 5993 %} 5994 5995 instruct prefetchAllocT2( memory mem ) %{ 5996 predicate(AllocatePrefetchInstr==2); 5997 match(PrefetchAllocation mem); 5998 ins_cost(125); 5999 6000 format %{ "PREFETCHT2 $mem\t# Prefetch allocation to level 2 cache for write" %} 6001 ins_encode %{ 6002 __ prefetcht2($mem$$Address); 6003 %} 6004 ins_pipe(ialu_mem); 6005 %} 6006 6007 //----------Store Instructions------------------------------------------------- 6008 6009 // Store Byte 6010 instruct storeB(memory mem, rRegI src) 6011 %{ 6012 match(Set mem (StoreB mem src)); 6013 6014 ins_cost(125); // XXX 6015 format %{ "movb $mem, $src\t# byte" %} 6016 ins_encode %{ 6017 __ movb($mem$$Address, $src$$Register); 6018 %} 6019 ins_pipe(ialu_mem_reg); 6020 %} 6021 6022 // Store Char/Short 6023 instruct storeC(memory mem, rRegI src) 6024 %{ 6025 match(Set mem (StoreC mem src)); 6026 6027 ins_cost(125); // XXX 6028 format %{ "movw $mem, $src\t# char/short" %} 6029 ins_encode %{ 6030 __ movw($mem$$Address, $src$$Register); 6031 %} 6032 ins_pipe(ialu_mem_reg); 6033 %} 6034 6035 // Store Integer 6036 instruct storeI(memory mem, rRegI src) 6037 %{ 6038 match(Set mem (StoreI mem src)); 6039 6040 ins_cost(125); // XXX 6041 format %{ "movl $mem, $src\t# int" %} 6042 ins_encode %{ 6043 __ movl($mem$$Address, $src$$Register); 6044 %} 6045 ins_pipe(ialu_mem_reg); 6046 %} 6047 6048 // Store Long 6049 instruct storeL(memory mem, rRegL src) 6050 %{ 6051 match(Set mem (StoreL mem src)); 6052 6053 ins_cost(125); // XXX 6054 format %{ "movq $mem, $src\t# long" %} 6055 ins_encode %{ 6056 __ movq($mem$$Address, $src$$Register); 6057 %} 6058 ins_pipe(ialu_mem_reg); // XXX 6059 %} 6060 6061 // Store Pointer 6062 instruct storeP(memory mem, any_RegP src) 6063 %{ 6064 match(Set mem (StoreP mem src)); 6065 6066 ins_cost(125); // XXX 6067 format %{ "movq $mem, $src\t# ptr" %} 6068 ins_encode %{ 6069 __ movq($mem$$Address, $src$$Register); 6070 %} 6071 ins_pipe(ialu_mem_reg); 6072 %} 6073 6074 instruct storeImmP0(memory mem, immP0 zero) 6075 %{ 6076 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6077 match(Set mem (StoreP mem zero)); 6078 6079 ins_cost(125); // XXX 6080 format %{ "movq $mem, R12\t# ptr (R12_heapbase==0)" %} 6081 ins_encode %{ 6082 __ movq($mem$$Address, r12); 6083 %} 6084 ins_pipe(ialu_mem_reg); 6085 %} 6086 6087 // Store NULL Pointer, mark word, or other simple pointer constant. 6088 instruct storeImmP(memory mem, immP31 src) 6089 %{ 6090 match(Set mem (StoreP mem src)); 6091 6092 ins_cost(150); // XXX 6093 format %{ "movq $mem, $src\t# ptr" %} 6094 ins_encode %{ 6095 __ movq($mem$$Address, $src$$constant); 6096 %} 6097 ins_pipe(ialu_mem_imm); 6098 %} 6099 6100 // Store Compressed Pointer 6101 instruct storeN(memory mem, rRegN src) 6102 %{ 6103 match(Set mem (StoreN mem src)); 6104 6105 ins_cost(125); // XXX 6106 format %{ "movl $mem, $src\t# compressed ptr" %} 6107 ins_encode %{ 6108 __ movl($mem$$Address, $src$$Register); 6109 %} 6110 ins_pipe(ialu_mem_reg); 6111 %} 6112 6113 instruct storeNKlass(memory mem, rRegN src) 6114 %{ 6115 match(Set mem (StoreNKlass mem src)); 6116 6117 ins_cost(125); // XXX 6118 format %{ "movl $mem, $src\t# compressed klass ptr" %} 6119 ins_encode %{ 6120 __ movl($mem$$Address, $src$$Register); 6121 %} 6122 ins_pipe(ialu_mem_reg); 6123 %} 6124 6125 instruct storeImmN0(memory mem, immN0 zero) 6126 %{ 6127 predicate(CompressedOops::base() == NULL); 6128 match(Set mem (StoreN mem zero)); 6129 6130 ins_cost(125); // XXX 6131 format %{ "movl $mem, R12\t# compressed ptr (R12_heapbase==0)" %} 6132 ins_encode %{ 6133 __ movl($mem$$Address, r12); 6134 %} 6135 ins_pipe(ialu_mem_reg); 6136 %} 6137 6138 instruct storeImmN(memory mem, immN src) 6139 %{ 6140 match(Set mem (StoreN mem src)); 6141 6142 ins_cost(150); // XXX 6143 format %{ "movl $mem, $src\t# compressed ptr" %} 6144 ins_encode %{ 6145 address con = (address)$src$$constant; 6146 if (con == NULL) { 6147 __ movl($mem$$Address, (int32_t)0); 6148 } else { 6149 __ set_narrow_oop($mem$$Address, (jobject)$src$$constant); 6150 } 6151 %} 6152 ins_pipe(ialu_mem_imm); 6153 %} 6154 6155 instruct storeImmNKlass(memory mem, immNKlass src) 6156 %{ 6157 match(Set mem (StoreNKlass mem src)); 6158 6159 ins_cost(150); // XXX 6160 format %{ "movl $mem, $src\t# compressed klass ptr" %} 6161 ins_encode %{ 6162 __ set_narrow_klass($mem$$Address, (Klass*)$src$$constant); 6163 %} 6164 ins_pipe(ialu_mem_imm); 6165 %} 6166 6167 // Store Integer Immediate 6168 instruct storeImmI0(memory mem, immI_0 zero) 6169 %{ 6170 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6171 match(Set mem (StoreI mem zero)); 6172 6173 ins_cost(125); // XXX 6174 format %{ "movl $mem, R12\t# int (R12_heapbase==0)" %} 6175 ins_encode %{ 6176 __ movl($mem$$Address, r12); 6177 %} 6178 ins_pipe(ialu_mem_reg); 6179 %} 6180 6181 instruct storeImmI(memory mem, immI src) 6182 %{ 6183 match(Set mem (StoreI mem src)); 6184 6185 ins_cost(150); 6186 format %{ "movl $mem, $src\t# int" %} 6187 ins_encode %{ 6188 __ movl($mem$$Address, $src$$constant); 6189 %} 6190 ins_pipe(ialu_mem_imm); 6191 %} 6192 6193 // Store Long Immediate 6194 instruct storeImmL0(memory mem, immL0 zero) 6195 %{ 6196 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6197 match(Set mem (StoreL mem zero)); 6198 6199 ins_cost(125); // XXX 6200 format %{ "movq $mem, R12\t# long (R12_heapbase==0)" %} 6201 ins_encode %{ 6202 __ movq($mem$$Address, r12); 6203 %} 6204 ins_pipe(ialu_mem_reg); 6205 %} 6206 6207 instruct storeImmL(memory mem, immL32 src) 6208 %{ 6209 match(Set mem (StoreL mem src)); 6210 6211 ins_cost(150); 6212 format %{ "movq $mem, $src\t# long" %} 6213 ins_encode %{ 6214 __ movq($mem$$Address, $src$$constant); 6215 %} 6216 ins_pipe(ialu_mem_imm); 6217 %} 6218 6219 // Store Short/Char Immediate 6220 instruct storeImmC0(memory mem, immI_0 zero) 6221 %{ 6222 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6223 match(Set mem (StoreC mem zero)); 6224 6225 ins_cost(125); // XXX 6226 format %{ "movw $mem, R12\t# short/char (R12_heapbase==0)" %} 6227 ins_encode %{ 6228 __ movw($mem$$Address, r12); 6229 %} 6230 ins_pipe(ialu_mem_reg); 6231 %} 6232 6233 instruct storeImmI16(memory mem, immI16 src) 6234 %{ 6235 predicate(UseStoreImmI16); 6236 match(Set mem (StoreC mem src)); 6237 6238 ins_cost(150); 6239 format %{ "movw $mem, $src\t# short/char" %} 6240 ins_encode %{ 6241 __ movw($mem$$Address, $src$$constant); 6242 %} 6243 ins_pipe(ialu_mem_imm); 6244 %} 6245 6246 // Store Byte Immediate 6247 instruct storeImmB0(memory mem, immI_0 zero) 6248 %{ 6249 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6250 match(Set mem (StoreB mem zero)); 6251 6252 ins_cost(125); // XXX 6253 format %{ "movb $mem, R12\t# short/char (R12_heapbase==0)" %} 6254 ins_encode %{ 6255 __ movb($mem$$Address, r12); 6256 %} 6257 ins_pipe(ialu_mem_reg); 6258 %} 6259 6260 instruct storeImmB(memory mem, immI8 src) 6261 %{ 6262 match(Set mem (StoreB mem src)); 6263 6264 ins_cost(150); // XXX 6265 format %{ "movb $mem, $src\t# byte" %} 6266 ins_encode %{ 6267 __ movb($mem$$Address, $src$$constant); 6268 %} 6269 ins_pipe(ialu_mem_imm); 6270 %} 6271 6272 // Store CMS card-mark Immediate 6273 instruct storeImmCM0_reg(memory mem, immI_0 zero) 6274 %{ 6275 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6276 match(Set mem (StoreCM mem zero)); 6277 6278 ins_cost(125); // XXX 6279 format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %} 6280 ins_encode %{ 6281 __ movb($mem$$Address, r12); 6282 %} 6283 ins_pipe(ialu_mem_reg); 6284 %} 6285 6286 instruct storeImmCM0(memory mem, immI_0 src) 6287 %{ 6288 match(Set mem (StoreCM mem src)); 6289 6290 ins_cost(150); // XXX 6291 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %} 6292 ins_encode %{ 6293 __ movb($mem$$Address, $src$$constant); 6294 %} 6295 ins_pipe(ialu_mem_imm); 6296 %} 6297 6298 // Store Float 6299 instruct storeF(memory mem, regF src) 6300 %{ 6301 match(Set mem (StoreF mem src)); 6302 6303 ins_cost(95); // XXX 6304 format %{ "movss $mem, $src\t# float" %} 6305 ins_encode %{ 6306 __ movflt($mem$$Address, $src$$XMMRegister); 6307 %} 6308 ins_pipe(pipe_slow); // XXX 6309 %} 6310 6311 // Store immediate Float value (it is faster than store from XMM register) 6312 instruct storeF0(memory mem, immF0 zero) 6313 %{ 6314 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6315 match(Set mem (StoreF mem zero)); 6316 6317 ins_cost(25); // XXX 6318 format %{ "movl $mem, R12\t# float 0. (R12_heapbase==0)" %} 6319 ins_encode %{ 6320 __ movl($mem$$Address, r12); 6321 %} 6322 ins_pipe(ialu_mem_reg); 6323 %} 6324 6325 instruct storeF_imm(memory mem, immF src) 6326 %{ 6327 match(Set mem (StoreF mem src)); 6328 6329 ins_cost(50); 6330 format %{ "movl $mem, $src\t# float" %} 6331 ins_encode %{ 6332 __ movl($mem$$Address, jint_cast($src$$constant)); 6333 %} 6334 ins_pipe(ialu_mem_imm); 6335 %} 6336 6337 // Store Double 6338 instruct storeD(memory mem, regD src) 6339 %{ 6340 match(Set mem (StoreD mem src)); 6341 6342 ins_cost(95); // XXX 6343 format %{ "movsd $mem, $src\t# double" %} 6344 ins_encode %{ 6345 __ movdbl($mem$$Address, $src$$XMMRegister); 6346 %} 6347 ins_pipe(pipe_slow); // XXX 6348 %} 6349 6350 // Store immediate double 0.0 (it is faster than store from XMM register) 6351 instruct storeD0_imm(memory mem, immD0 src) 6352 %{ 6353 predicate(!UseCompressedOops || (CompressedOops::base() != NULL)); 6354 match(Set mem (StoreD mem src)); 6355 6356 ins_cost(50); 6357 format %{ "movq $mem, $src\t# double 0." %} 6358 ins_encode %{ 6359 __ movq($mem$$Address, $src$$constant); 6360 %} 6361 ins_pipe(ialu_mem_imm); 6362 %} 6363 6364 instruct storeD0(memory mem, immD0 zero) 6365 %{ 6366 predicate(UseCompressedOops && (CompressedOops::base() == NULL)); 6367 match(Set mem (StoreD mem zero)); 6368 6369 ins_cost(25); // XXX 6370 format %{ "movq $mem, R12\t# double 0. (R12_heapbase==0)" %} 6371 ins_encode %{ 6372 __ movq($mem$$Address, r12); 6373 %} 6374 ins_pipe(ialu_mem_reg); 6375 %} 6376 6377 instruct storeSSI(stackSlotI dst, rRegI src) 6378 %{ 6379 match(Set dst src); 6380 6381 ins_cost(100); 6382 format %{ "movl $dst, $src\t# int stk" %} 6383 opcode(0x89); 6384 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 6385 ins_pipe( ialu_mem_reg ); 6386 %} 6387 6388 instruct storeSSL(stackSlotL dst, rRegL src) 6389 %{ 6390 match(Set dst src); 6391 6392 ins_cost(100); 6393 format %{ "movq $dst, $src\t# long stk" %} 6394 opcode(0x89); 6395 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 6396 ins_pipe(ialu_mem_reg); 6397 %} 6398 6399 instruct storeSSP(stackSlotP dst, rRegP src) 6400 %{ 6401 match(Set dst src); 6402 6403 ins_cost(100); 6404 format %{ "movq $dst, $src\t# ptr stk" %} 6405 opcode(0x89); 6406 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 6407 ins_pipe(ialu_mem_reg); 6408 %} 6409 6410 instruct storeSSF(stackSlotF dst, regF src) 6411 %{ 6412 match(Set dst src); 6413 6414 ins_cost(95); // XXX 6415 format %{ "movss $dst, $src\t# float stk" %} 6416 ins_encode %{ 6417 __ movflt(Address(rsp, $dst$$disp), $src$$XMMRegister); 6418 %} 6419 ins_pipe(pipe_slow); // XXX 6420 %} 6421 6422 instruct storeSSD(stackSlotD dst, regD src) 6423 %{ 6424 match(Set dst src); 6425 6426 ins_cost(95); // XXX 6427 format %{ "movsd $dst, $src\t# double stk" %} 6428 ins_encode %{ 6429 __ movdbl(Address(rsp, $dst$$disp), $src$$XMMRegister); 6430 %} 6431 ins_pipe(pipe_slow); // XXX 6432 %} 6433 6434 instruct cacheWB(indirect addr) 6435 %{ 6436 predicate(VM_Version::supports_data_cache_line_flush()); 6437 match(CacheWB addr); 6438 6439 ins_cost(100); 6440 format %{"cache wb $addr" %} 6441 ins_encode %{ 6442 assert($addr->index_position() < 0, "should be"); 6443 assert($addr$$disp == 0, "should be"); 6444 __ cache_wb(Address($addr$$base$$Register, 0)); 6445 %} 6446 ins_pipe(pipe_slow); // XXX 6447 %} 6448 6449 instruct cacheWBPreSync() 6450 %{ 6451 predicate(VM_Version::supports_data_cache_line_flush()); 6452 match(CacheWBPreSync); 6453 6454 ins_cost(100); 6455 format %{"cache wb presync" %} 6456 ins_encode %{ 6457 __ cache_wbsync(true); 6458 %} 6459 ins_pipe(pipe_slow); // XXX 6460 %} 6461 6462 instruct cacheWBPostSync() 6463 %{ 6464 predicate(VM_Version::supports_data_cache_line_flush()); 6465 match(CacheWBPostSync); 6466 6467 ins_cost(100); 6468 format %{"cache wb postsync" %} 6469 ins_encode %{ 6470 __ cache_wbsync(false); 6471 %} 6472 ins_pipe(pipe_slow); // XXX 6473 %} 6474 6475 //----------BSWAP Instructions------------------------------------------------- 6476 instruct bytes_reverse_int(rRegI dst) %{ 6477 match(Set dst (ReverseBytesI dst)); 6478 6479 format %{ "bswapl $dst" %} 6480 ins_encode %{ 6481 __ bswapl($dst$$Register); 6482 %} 6483 ins_pipe( ialu_reg ); 6484 %} 6485 6486 instruct bytes_reverse_long(rRegL dst) %{ 6487 match(Set dst (ReverseBytesL dst)); 6488 6489 format %{ "bswapq $dst" %} 6490 ins_encode %{ 6491 __ bswapq($dst$$Register); 6492 %} 6493 ins_pipe( ialu_reg); 6494 %} 6495 6496 instruct bytes_reverse_unsigned_short(rRegI dst, rFlagsReg cr) %{ 6497 match(Set dst (ReverseBytesUS dst)); 6498 effect(KILL cr); 6499 6500 format %{ "bswapl $dst\n\t" 6501 "shrl $dst,16\n\t" %} 6502 ins_encode %{ 6503 __ bswapl($dst$$Register); 6504 __ shrl($dst$$Register, 16); 6505 %} 6506 ins_pipe( ialu_reg ); 6507 %} 6508 6509 instruct bytes_reverse_short(rRegI dst, rFlagsReg cr) %{ 6510 match(Set dst (ReverseBytesS dst)); 6511 effect(KILL cr); 6512 6513 format %{ "bswapl $dst\n\t" 6514 "sar $dst,16\n\t" %} 6515 ins_encode %{ 6516 __ bswapl($dst$$Register); 6517 __ sarl($dst$$Register, 16); 6518 %} 6519 ins_pipe( ialu_reg ); 6520 %} 6521 6522 //---------- Zeros Count Instructions ------------------------------------------ 6523 6524 instruct countLeadingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{ 6525 predicate(UseCountLeadingZerosInstruction); 6526 match(Set dst (CountLeadingZerosI src)); 6527 effect(KILL cr); 6528 6529 format %{ "lzcntl $dst, $src\t# count leading zeros (int)" %} 6530 ins_encode %{ 6531 __ lzcntl($dst$$Register, $src$$Register); 6532 %} 6533 ins_pipe(ialu_reg); 6534 %} 6535 6536 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, rFlagsReg cr) %{ 6537 predicate(!UseCountLeadingZerosInstruction); 6538 match(Set dst (CountLeadingZerosI src)); 6539 effect(KILL cr); 6540 6541 format %{ "bsrl $dst, $src\t# count leading zeros (int)\n\t" 6542 "jnz skip\n\t" 6543 "movl $dst, -1\n" 6544 "skip:\n\t" 6545 "negl $dst\n\t" 6546 "addl $dst, 31" %} 6547 ins_encode %{ 6548 Register Rdst = $dst$$Register; 6549 Register Rsrc = $src$$Register; 6550 Label skip; 6551 __ bsrl(Rdst, Rsrc); 6552 __ jccb(Assembler::notZero, skip); 6553 __ movl(Rdst, -1); 6554 __ bind(skip); 6555 __ negl(Rdst); 6556 __ addl(Rdst, BitsPerInt - 1); 6557 %} 6558 ins_pipe(ialu_reg); 6559 %} 6560 6561 instruct countLeadingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{ 6562 predicate(UseCountLeadingZerosInstruction); 6563 match(Set dst (CountLeadingZerosL src)); 6564 effect(KILL cr); 6565 6566 format %{ "lzcntq $dst, $src\t# count leading zeros (long)" %} 6567 ins_encode %{ 6568 __ lzcntq($dst$$Register, $src$$Register); 6569 %} 6570 ins_pipe(ialu_reg); 6571 %} 6572 6573 instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{ 6574 predicate(!UseCountLeadingZerosInstruction); 6575 match(Set dst (CountLeadingZerosL src)); 6576 effect(KILL cr); 6577 6578 format %{ "bsrq $dst, $src\t# count leading zeros (long)\n\t" 6579 "jnz skip\n\t" 6580 "movl $dst, -1\n" 6581 "skip:\n\t" 6582 "negl $dst\n\t" 6583 "addl $dst, 63" %} 6584 ins_encode %{ 6585 Register Rdst = $dst$$Register; 6586 Register Rsrc = $src$$Register; 6587 Label skip; 6588 __ bsrq(Rdst, Rsrc); 6589 __ jccb(Assembler::notZero, skip); 6590 __ movl(Rdst, -1); 6591 __ bind(skip); 6592 __ negl(Rdst); 6593 __ addl(Rdst, BitsPerLong - 1); 6594 %} 6595 ins_pipe(ialu_reg); 6596 %} 6597 6598 instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{ 6599 predicate(UseCountTrailingZerosInstruction); 6600 match(Set dst (CountTrailingZerosI src)); 6601 effect(KILL cr); 6602 6603 format %{ "tzcntl $dst, $src\t# count trailing zeros (int)" %} 6604 ins_encode %{ 6605 __ tzcntl($dst$$Register, $src$$Register); 6606 %} 6607 ins_pipe(ialu_reg); 6608 %} 6609 6610 instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, rFlagsReg cr) %{ 6611 predicate(!UseCountTrailingZerosInstruction); 6612 match(Set dst (CountTrailingZerosI src)); 6613 effect(KILL cr); 6614 6615 format %{ "bsfl $dst, $src\t# count trailing zeros (int)\n\t" 6616 "jnz done\n\t" 6617 "movl $dst, 32\n" 6618 "done:" %} 6619 ins_encode %{ 6620 Register Rdst = $dst$$Register; 6621 Label done; 6622 __ bsfl(Rdst, $src$$Register); 6623 __ jccb(Assembler::notZero, done); 6624 __ movl(Rdst, BitsPerInt); 6625 __ bind(done); 6626 %} 6627 ins_pipe(ialu_reg); 6628 %} 6629 6630 instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{ 6631 predicate(UseCountTrailingZerosInstruction); 6632 match(Set dst (CountTrailingZerosL src)); 6633 effect(KILL cr); 6634 6635 format %{ "tzcntq $dst, $src\t# count trailing zeros (long)" %} 6636 ins_encode %{ 6637 __ tzcntq($dst$$Register, $src$$Register); 6638 %} 6639 ins_pipe(ialu_reg); 6640 %} 6641 6642 instruct countTrailingZerosL_bsf(rRegI dst, rRegL src, rFlagsReg cr) %{ 6643 predicate(!UseCountTrailingZerosInstruction); 6644 match(Set dst (CountTrailingZerosL src)); 6645 effect(KILL cr); 6646 6647 format %{ "bsfq $dst, $src\t# count trailing zeros (long)\n\t" 6648 "jnz done\n\t" 6649 "movl $dst, 64\n" 6650 "done:" %} 6651 ins_encode %{ 6652 Register Rdst = $dst$$Register; 6653 Label done; 6654 __ bsfq(Rdst, $src$$Register); 6655 __ jccb(Assembler::notZero, done); 6656 __ movl(Rdst, BitsPerLong); 6657 __ bind(done); 6658 %} 6659 ins_pipe(ialu_reg); 6660 %} 6661 6662 6663 //---------- Population Count Instructions ------------------------------------- 6664 6665 instruct popCountI(rRegI dst, rRegI src, rFlagsReg cr) %{ 6666 predicate(UsePopCountInstruction); 6667 match(Set dst (PopCountI src)); 6668 effect(KILL cr); 6669 6670 format %{ "popcnt $dst, $src" %} 6671 ins_encode %{ 6672 __ popcntl($dst$$Register, $src$$Register); 6673 %} 6674 ins_pipe(ialu_reg); 6675 %} 6676 6677 instruct popCountI_mem(rRegI dst, memory mem, rFlagsReg cr) %{ 6678 predicate(UsePopCountInstruction); 6679 match(Set dst (PopCountI (LoadI mem))); 6680 effect(KILL cr); 6681 6682 format %{ "popcnt $dst, $mem" %} 6683 ins_encode %{ 6684 __ popcntl($dst$$Register, $mem$$Address); 6685 %} 6686 ins_pipe(ialu_reg); 6687 %} 6688 6689 // Note: Long.bitCount(long) returns an int. 6690 instruct popCountL(rRegI dst, rRegL src, rFlagsReg cr) %{ 6691 predicate(UsePopCountInstruction); 6692 match(Set dst (PopCountL src)); 6693 effect(KILL cr); 6694 6695 format %{ "popcnt $dst, $src" %} 6696 ins_encode %{ 6697 __ popcntq($dst$$Register, $src$$Register); 6698 %} 6699 ins_pipe(ialu_reg); 6700 %} 6701 6702 // Note: Long.bitCount(long) returns an int. 6703 instruct popCountL_mem(rRegI dst, memory mem, rFlagsReg cr) %{ 6704 predicate(UsePopCountInstruction); 6705 match(Set dst (PopCountL (LoadL mem))); 6706 effect(KILL cr); 6707 6708 format %{ "popcnt $dst, $mem" %} 6709 ins_encode %{ 6710 __ popcntq($dst$$Register, $mem$$Address); 6711 %} 6712 ins_pipe(ialu_reg); 6713 %} 6714 6715 6716 //----------MemBar Instructions----------------------------------------------- 6717 // Memory barrier flavors 6718 6719 instruct membar_acquire() 6720 %{ 6721 match(MemBarAcquire); 6722 match(LoadFence); 6723 ins_cost(0); 6724 6725 size(0); 6726 format %{ "MEMBAR-acquire ! (empty encoding)" %} 6727 ins_encode(); 6728 ins_pipe(empty); 6729 %} 6730 6731 instruct membar_acquire_lock() 6732 %{ 6733 match(MemBarAcquireLock); 6734 ins_cost(0); 6735 6736 size(0); 6737 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %} 6738 ins_encode(); 6739 ins_pipe(empty); 6740 %} 6741 6742 instruct membar_release() 6743 %{ 6744 match(MemBarRelease); 6745 match(StoreFence); 6746 ins_cost(0); 6747 6748 size(0); 6749 format %{ "MEMBAR-release ! (empty encoding)" %} 6750 ins_encode(); 6751 ins_pipe(empty); 6752 %} 6753 6754 instruct membar_release_lock() 6755 %{ 6756 match(MemBarReleaseLock); 6757 ins_cost(0); 6758 6759 size(0); 6760 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %} 6761 ins_encode(); 6762 ins_pipe(empty); 6763 %} 6764 6765 instruct membar_volatile(rFlagsReg cr) %{ 6766 match(MemBarVolatile); 6767 effect(KILL cr); 6768 ins_cost(400); 6769 6770 format %{ 6771 $$template 6772 $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile" 6773 %} 6774 ins_encode %{ 6775 __ membar(Assembler::StoreLoad); 6776 %} 6777 ins_pipe(pipe_slow); 6778 %} 6779 6780 instruct unnecessary_membar_volatile() 6781 %{ 6782 match(MemBarVolatile); 6783 predicate(Matcher::post_store_load_barrier(n)); 6784 ins_cost(0); 6785 6786 size(0); 6787 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %} 6788 ins_encode(); 6789 ins_pipe(empty); 6790 %} 6791 6792 instruct membar_storestore() %{ 6793 match(MemBarStoreStore); 6794 match(StoreStoreFence); 6795 ins_cost(0); 6796 6797 size(0); 6798 format %{ "MEMBAR-storestore (empty encoding)" %} 6799 ins_encode( ); 6800 ins_pipe(empty); 6801 %} 6802 6803 //----------Move Instructions-------------------------------------------------- 6804 6805 instruct castX2P(rRegP dst, rRegL src) 6806 %{ 6807 match(Set dst (CastX2P src)); 6808 6809 format %{ "movq $dst, $src\t# long->ptr" %} 6810 ins_encode %{ 6811 if ($dst$$reg != $src$$reg) { 6812 __ movptr($dst$$Register, $src$$Register); 6813 } 6814 %} 6815 ins_pipe(ialu_reg_reg); // XXX 6816 %} 6817 6818 instruct castP2X(rRegL dst, rRegP src) 6819 %{ 6820 match(Set dst (CastP2X src)); 6821 6822 format %{ "movq $dst, $src\t# ptr -> long" %} 6823 ins_encode %{ 6824 if ($dst$$reg != $src$$reg) { 6825 __ movptr($dst$$Register, $src$$Register); 6826 } 6827 %} 6828 ins_pipe(ialu_reg_reg); // XXX 6829 %} 6830 6831 // Convert oop into int for vectors alignment masking 6832 instruct convP2I(rRegI dst, rRegP src) 6833 %{ 6834 match(Set dst (ConvL2I (CastP2X src))); 6835 6836 format %{ "movl $dst, $src\t# ptr -> int" %} 6837 ins_encode %{ 6838 __ movl($dst$$Register, $src$$Register); 6839 %} 6840 ins_pipe(ialu_reg_reg); // XXX 6841 %} 6842 6843 // Convert compressed oop into int for vectors alignment masking 6844 // in case of 32bit oops (heap < 4Gb). 6845 instruct convN2I(rRegI dst, rRegN src) 6846 %{ 6847 predicate(CompressedOops::shift() == 0); 6848 match(Set dst (ConvL2I (CastP2X (DecodeN src)))); 6849 6850 format %{ "movl $dst, $src\t# compressed ptr -> int" %} 6851 ins_encode %{ 6852 __ movl($dst$$Register, $src$$Register); 6853 %} 6854 ins_pipe(ialu_reg_reg); // XXX 6855 %} 6856 6857 // Convert oop pointer into compressed form 6858 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{ 6859 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6860 match(Set dst (EncodeP src)); 6861 effect(KILL cr); 6862 format %{ "encode_heap_oop $dst,$src" %} 6863 ins_encode %{ 6864 Register s = $src$$Register; 6865 Register d = $dst$$Register; 6866 if (s != d) { 6867 __ movq(d, s); 6868 } 6869 __ encode_heap_oop(d); 6870 %} 6871 ins_pipe(ialu_reg_long); 6872 %} 6873 6874 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ 6875 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6876 match(Set dst (EncodeP src)); 6877 effect(KILL cr); 6878 format %{ "encode_heap_oop_not_null $dst,$src" %} 6879 ins_encode %{ 6880 __ encode_heap_oop_not_null($dst$$Register, $src$$Register); 6881 %} 6882 ins_pipe(ialu_reg_long); 6883 %} 6884 6885 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{ 6886 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull && 6887 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant); 6888 match(Set dst (DecodeN src)); 6889 effect(KILL cr); 6890 format %{ "decode_heap_oop $dst,$src" %} 6891 ins_encode %{ 6892 Register s = $src$$Register; 6893 Register d = $dst$$Register; 6894 if (s != d) { 6895 __ movq(d, s); 6896 } 6897 __ decode_heap_oop(d); 6898 %} 6899 ins_pipe(ialu_reg_long); 6900 %} 6901 6902 instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ 6903 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull || 6904 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant); 6905 match(Set dst (DecodeN src)); 6906 effect(KILL cr); 6907 format %{ "decode_heap_oop_not_null $dst,$src" %} 6908 ins_encode %{ 6909 Register s = $src$$Register; 6910 Register d = $dst$$Register; 6911 if (s != d) { 6912 __ decode_heap_oop_not_null(d, s); 6913 } else { 6914 __ decode_heap_oop_not_null(d); 6915 } 6916 %} 6917 ins_pipe(ialu_reg_long); 6918 %} 6919 6920 instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ 6921 match(Set dst (EncodePKlass src)); 6922 effect(TEMP dst, KILL cr); 6923 format %{ "encode_and_move_klass_not_null $dst,$src" %} 6924 ins_encode %{ 6925 __ encode_and_move_klass_not_null($dst$$Register, $src$$Register); 6926 %} 6927 ins_pipe(ialu_reg_long); 6928 %} 6929 6930 instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ 6931 match(Set dst (DecodeNKlass src)); 6932 effect(TEMP dst, KILL cr); 6933 format %{ "decode_and_move_klass_not_null $dst,$src" %} 6934 ins_encode %{ 6935 __ decode_and_move_klass_not_null($dst$$Register, $src$$Register); 6936 %} 6937 ins_pipe(ialu_reg_long); 6938 %} 6939 6940 //----------Conditional Move--------------------------------------------------- 6941 // Jump 6942 // dummy instruction for generating temp registers 6943 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{ 6944 match(Jump (LShiftL switch_val shift)); 6945 ins_cost(350); 6946 predicate(false); 6947 effect(TEMP dest); 6948 6949 format %{ "leaq $dest, [$constantaddress]\n\t" 6950 "jmp [$dest + $switch_val << $shift]\n\t" %} 6951 ins_encode %{ 6952 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 6953 // to do that and the compiler is using that register as one it can allocate. 6954 // So we build it all by hand. 6955 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant); 6956 // ArrayAddress dispatch(table, index); 6957 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant); 6958 __ lea($dest$$Register, $constantaddress); 6959 __ jmp(dispatch); 6960 %} 6961 ins_pipe(pipe_jmp); 6962 %} 6963 6964 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{ 6965 match(Jump (AddL (LShiftL switch_val shift) offset)); 6966 ins_cost(350); 6967 effect(TEMP dest); 6968 6969 format %{ "leaq $dest, [$constantaddress]\n\t" 6970 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %} 6971 ins_encode %{ 6972 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 6973 // to do that and the compiler is using that register as one it can allocate. 6974 // So we build it all by hand. 6975 // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 6976 // ArrayAddress dispatch(table, index); 6977 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 6978 __ lea($dest$$Register, $constantaddress); 6979 __ jmp(dispatch); 6980 %} 6981 ins_pipe(pipe_jmp); 6982 %} 6983 6984 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{ 6985 match(Jump switch_val); 6986 ins_cost(350); 6987 effect(TEMP dest); 6988 6989 format %{ "leaq $dest, [$constantaddress]\n\t" 6990 "jmp [$dest + $switch_val]\n\t" %} 6991 ins_encode %{ 6992 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 6993 // to do that and the compiler is using that register as one it can allocate. 6994 // So we build it all by hand. 6995 // Address index(noreg, switch_reg, Address::times_1); 6996 // ArrayAddress dispatch(table, index); 6997 Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1); 6998 __ lea($dest$$Register, $constantaddress); 6999 __ jmp(dispatch); 7000 %} 7001 ins_pipe(pipe_jmp); 7002 %} 7003 7004 // Conditional move 7005 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop) 7006 %{ 7007 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7008 7009 ins_cost(200); // XXX 7010 format %{ "cmovl$cop $dst, $src\t# signed, int" %} 7011 ins_encode %{ 7012 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7013 %} 7014 ins_pipe(pipe_cmov_reg); 7015 %} 7016 7017 instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{ 7018 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7019 7020 ins_cost(200); // XXX 7021 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %} 7022 ins_encode %{ 7023 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7024 %} 7025 ins_pipe(pipe_cmov_reg); 7026 %} 7027 7028 instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{ 7029 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7030 ins_cost(200); 7031 expand %{ 7032 cmovI_regU(cop, cr, dst, src); 7033 %} 7034 %} 7035 7036 // Conditional move 7037 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{ 7038 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7039 7040 ins_cost(250); // XXX 7041 format %{ "cmovl$cop $dst, $src\t# signed, int" %} 7042 ins_encode %{ 7043 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Address); 7044 %} 7045 ins_pipe(pipe_cmov_mem); 7046 %} 7047 7048 // Conditional move 7049 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src) 7050 %{ 7051 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7052 7053 ins_cost(250); // XXX 7054 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %} 7055 ins_encode %{ 7056 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Address); 7057 %} 7058 ins_pipe(pipe_cmov_mem); 7059 %} 7060 7061 instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{ 7062 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7063 ins_cost(250); 7064 expand %{ 7065 cmovI_memU(cop, cr, dst, src); 7066 %} 7067 %} 7068 7069 // Conditional move 7070 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop) 7071 %{ 7072 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7073 7074 ins_cost(200); // XXX 7075 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %} 7076 ins_encode %{ 7077 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7078 %} 7079 ins_pipe(pipe_cmov_reg); 7080 %} 7081 7082 // Conditional move 7083 instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src) 7084 %{ 7085 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7086 7087 ins_cost(200); // XXX 7088 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %} 7089 ins_encode %{ 7090 __ cmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7091 %} 7092 ins_pipe(pipe_cmov_reg); 7093 %} 7094 7095 instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{ 7096 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7097 ins_cost(200); 7098 expand %{ 7099 cmovN_regU(cop, cr, dst, src); 7100 %} 7101 %} 7102 7103 // Conditional move 7104 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop) 7105 %{ 7106 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7107 7108 ins_cost(200); // XXX 7109 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %} 7110 ins_encode %{ 7111 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7112 %} 7113 ins_pipe(pipe_cmov_reg); // XXX 7114 %} 7115 7116 // Conditional move 7117 instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src) 7118 %{ 7119 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7120 7121 ins_cost(200); // XXX 7122 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %} 7123 ins_encode %{ 7124 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7125 %} 7126 ins_pipe(pipe_cmov_reg); // XXX 7127 %} 7128 7129 instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{ 7130 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7131 ins_cost(200); 7132 expand %{ 7133 cmovP_regU(cop, cr, dst, src); 7134 %} 7135 %} 7136 7137 // DISABLED: Requires the ADLC to emit a bottom_type call that 7138 // correctly meets the two pointer arguments; one is an incoming 7139 // register but the other is a memory operand. ALSO appears to 7140 // be buggy with implicit null checks. 7141 // 7142 //// Conditional move 7143 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src) 7144 //%{ 7145 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src)))); 7146 // ins_cost(250); 7147 // format %{ "CMOV$cop $dst,$src\t# ptr" %} 7148 // opcode(0x0F,0x40); 7149 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) ); 7150 // ins_pipe( pipe_cmov_mem ); 7151 //%} 7152 // 7153 //// Conditional move 7154 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src) 7155 //%{ 7156 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src)))); 7157 // ins_cost(250); 7158 // format %{ "CMOV$cop $dst,$src\t# ptr" %} 7159 // opcode(0x0F,0x40); 7160 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) ); 7161 // ins_pipe( pipe_cmov_mem ); 7162 //%} 7163 7164 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src) 7165 %{ 7166 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7167 7168 ins_cost(200); // XXX 7169 format %{ "cmovq$cop $dst, $src\t# signed, long" %} 7170 ins_encode %{ 7171 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7172 %} 7173 ins_pipe(pipe_cmov_reg); // XXX 7174 %} 7175 7176 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src) 7177 %{ 7178 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7179 7180 ins_cost(200); // XXX 7181 format %{ "cmovq$cop $dst, $src\t# signed, long" %} 7182 ins_encode %{ 7183 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Address); 7184 %} 7185 ins_pipe(pipe_cmov_mem); // XXX 7186 %} 7187 7188 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src) 7189 %{ 7190 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7191 7192 ins_cost(200); // XXX 7193 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %} 7194 ins_encode %{ 7195 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Register); 7196 %} 7197 ins_pipe(pipe_cmov_reg); // XXX 7198 %} 7199 7200 instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{ 7201 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7202 ins_cost(200); 7203 expand %{ 7204 cmovL_regU(cop, cr, dst, src); 7205 %} 7206 %} 7207 7208 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src) 7209 %{ 7210 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7211 7212 ins_cost(200); // XXX 7213 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %} 7214 ins_encode %{ 7215 __ cmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src$$Address); 7216 %} 7217 ins_pipe(pipe_cmov_mem); // XXX 7218 %} 7219 7220 instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{ 7221 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7222 ins_cost(200); 7223 expand %{ 7224 cmovL_memU(cop, cr, dst, src); 7225 %} 7226 %} 7227 7228 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src) 7229 %{ 7230 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7231 7232 ins_cost(200); // XXX 7233 format %{ "jn$cop skip\t# signed cmove float\n\t" 7234 "movss $dst, $src\n" 7235 "skip:" %} 7236 ins_encode %{ 7237 Label Lskip; 7238 // Invert sense of branch from sense of CMOV 7239 __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip); 7240 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 7241 __ bind(Lskip); 7242 %} 7243 ins_pipe(pipe_slow); 7244 %} 7245 7246 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src) 7247 // %{ 7248 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src)))); 7249 7250 // ins_cost(200); // XXX 7251 // format %{ "jn$cop skip\t# signed cmove float\n\t" 7252 // "movss $dst, $src\n" 7253 // "skip:" %} 7254 // ins_encode(enc_cmovf_mem_branch(cop, dst, src)); 7255 // ins_pipe(pipe_slow); 7256 // %} 7257 7258 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src) 7259 %{ 7260 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7261 7262 ins_cost(200); // XXX 7263 format %{ "jn$cop skip\t# unsigned cmove float\n\t" 7264 "movss $dst, $src\n" 7265 "skip:" %} 7266 ins_encode %{ 7267 Label Lskip; 7268 // Invert sense of branch from sense of CMOV 7269 __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip); 7270 __ movflt($dst$$XMMRegister, $src$$XMMRegister); 7271 __ bind(Lskip); 7272 %} 7273 ins_pipe(pipe_slow); 7274 %} 7275 7276 instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{ 7277 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7278 ins_cost(200); 7279 expand %{ 7280 cmovF_regU(cop, cr, dst, src); 7281 %} 7282 %} 7283 7284 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src) 7285 %{ 7286 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7287 7288 ins_cost(200); // XXX 7289 format %{ "jn$cop skip\t# signed cmove double\n\t" 7290 "movsd $dst, $src\n" 7291 "skip:" %} 7292 ins_encode %{ 7293 Label Lskip; 7294 // Invert sense of branch from sense of CMOV 7295 __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip); 7296 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 7297 __ bind(Lskip); 7298 %} 7299 ins_pipe(pipe_slow); 7300 %} 7301 7302 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src) 7303 %{ 7304 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7305 7306 ins_cost(200); // XXX 7307 format %{ "jn$cop skip\t# unsigned cmove double\n\t" 7308 "movsd $dst, $src\n" 7309 "skip:" %} 7310 ins_encode %{ 7311 Label Lskip; 7312 // Invert sense of branch from sense of CMOV 7313 __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip); 7314 __ movdbl($dst$$XMMRegister, $src$$XMMRegister); 7315 __ bind(Lskip); 7316 %} 7317 ins_pipe(pipe_slow); 7318 %} 7319 7320 instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{ 7321 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7322 ins_cost(200); 7323 expand %{ 7324 cmovD_regU(cop, cr, dst, src); 7325 %} 7326 %} 7327 7328 //----------Arithmetic Instructions-------------------------------------------- 7329 //----------Addition Instructions---------------------------------------------- 7330 7331 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 7332 %{ 7333 match(Set dst (AddI dst src)); 7334 effect(KILL cr); 7335 7336 format %{ "addl $dst, $src\t# int" %} 7337 ins_encode %{ 7338 __ addl($dst$$Register, $src$$Register); 7339 %} 7340 ins_pipe(ialu_reg_reg); 7341 %} 7342 7343 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 7344 %{ 7345 match(Set dst (AddI dst src)); 7346 effect(KILL cr); 7347 7348 format %{ "addl $dst, $src\t# int" %} 7349 ins_encode %{ 7350 __ addl($dst$$Register, $src$$constant); 7351 %} 7352 ins_pipe( ialu_reg ); 7353 %} 7354 7355 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 7356 %{ 7357 match(Set dst (AddI dst (LoadI src))); 7358 effect(KILL cr); 7359 7360 ins_cost(125); // XXX 7361 format %{ "addl $dst, $src\t# int" %} 7362 ins_encode %{ 7363 __ addl($dst$$Register, $src$$Address); 7364 %} 7365 ins_pipe(ialu_reg_mem); 7366 %} 7367 7368 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 7369 %{ 7370 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7371 effect(KILL cr); 7372 7373 ins_cost(150); // XXX 7374 format %{ "addl $dst, $src\t# int" %} 7375 ins_encode %{ 7376 __ addl($dst$$Address, $src$$Register); 7377 %} 7378 ins_pipe(ialu_mem_reg); 7379 %} 7380 7381 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr) 7382 %{ 7383 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7384 effect(KILL cr); 7385 7386 ins_cost(125); // XXX 7387 format %{ "addl $dst, $src\t# int" %} 7388 ins_encode %{ 7389 __ addl($dst$$Address, $src$$constant); 7390 %} 7391 ins_pipe(ialu_mem_imm); 7392 %} 7393 7394 instruct incI_rReg(rRegI dst, immI_1 src, rFlagsReg cr) 7395 %{ 7396 predicate(UseIncDec); 7397 match(Set dst (AddI dst src)); 7398 effect(KILL cr); 7399 7400 format %{ "incl $dst\t# int" %} 7401 ins_encode %{ 7402 __ incrementl($dst$$Register); 7403 %} 7404 ins_pipe(ialu_reg); 7405 %} 7406 7407 instruct incI_mem(memory dst, immI_1 src, rFlagsReg cr) 7408 %{ 7409 predicate(UseIncDec); 7410 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7411 effect(KILL cr); 7412 7413 ins_cost(125); // XXX 7414 format %{ "incl $dst\t# int" %} 7415 ins_encode %{ 7416 __ incrementl($dst$$Address); 7417 %} 7418 ins_pipe(ialu_mem_imm); 7419 %} 7420 7421 // XXX why does that use AddI 7422 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr) 7423 %{ 7424 predicate(UseIncDec); 7425 match(Set dst (AddI dst src)); 7426 effect(KILL cr); 7427 7428 format %{ "decl $dst\t# int" %} 7429 ins_encode %{ 7430 __ decrementl($dst$$Register); 7431 %} 7432 ins_pipe(ialu_reg); 7433 %} 7434 7435 // XXX why does that use AddI 7436 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr) 7437 %{ 7438 predicate(UseIncDec); 7439 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7440 effect(KILL cr); 7441 7442 ins_cost(125); // XXX 7443 format %{ "decl $dst\t# int" %} 7444 ins_encode %{ 7445 __ decrementl($dst$$Address); 7446 %} 7447 ins_pipe(ialu_mem_imm); 7448 %} 7449 7450 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1) 7451 %{ 7452 match(Set dst (AddI src0 src1)); 7453 7454 ins_cost(110); 7455 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %} 7456 ins_encode %{ 7457 __ leal($dst$$Register, Address($src0$$Register, $src1$$constant)); 7458 %} 7459 ins_pipe(ialu_reg_reg); 7460 %} 7461 7462 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 7463 %{ 7464 match(Set dst (AddL dst src)); 7465 effect(KILL cr); 7466 7467 format %{ "addq $dst, $src\t# long" %} 7468 ins_encode %{ 7469 __ addq($dst$$Register, $src$$Register); 7470 %} 7471 ins_pipe(ialu_reg_reg); 7472 %} 7473 7474 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 7475 %{ 7476 match(Set dst (AddL dst src)); 7477 effect(KILL cr); 7478 7479 format %{ "addq $dst, $src\t# long" %} 7480 ins_encode %{ 7481 __ addq($dst$$Register, $src$$constant); 7482 %} 7483 ins_pipe( ialu_reg ); 7484 %} 7485 7486 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 7487 %{ 7488 match(Set dst (AddL dst (LoadL src))); 7489 effect(KILL cr); 7490 7491 ins_cost(125); // XXX 7492 format %{ "addq $dst, $src\t# long" %} 7493 ins_encode %{ 7494 __ addq($dst$$Register, $src$$Address); 7495 %} 7496 ins_pipe(ialu_reg_mem); 7497 %} 7498 7499 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 7500 %{ 7501 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 7502 effect(KILL cr); 7503 7504 ins_cost(150); // XXX 7505 format %{ "addq $dst, $src\t# long" %} 7506 ins_encode %{ 7507 __ addq($dst$$Address, $src$$Register); 7508 %} 7509 ins_pipe(ialu_mem_reg); 7510 %} 7511 7512 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 7513 %{ 7514 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 7515 effect(KILL cr); 7516 7517 ins_cost(125); // XXX 7518 format %{ "addq $dst, $src\t# long" %} 7519 ins_encode %{ 7520 __ addq($dst$$Address, $src$$constant); 7521 %} 7522 ins_pipe(ialu_mem_imm); 7523 %} 7524 7525 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr) 7526 %{ 7527 predicate(UseIncDec); 7528 match(Set dst (AddL dst src)); 7529 effect(KILL cr); 7530 7531 format %{ "incq $dst\t# long" %} 7532 ins_encode %{ 7533 __ incrementq($dst$$Register); 7534 %} 7535 ins_pipe(ialu_reg); 7536 %} 7537 7538 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr) 7539 %{ 7540 predicate(UseIncDec); 7541 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 7542 effect(KILL cr); 7543 7544 ins_cost(125); // XXX 7545 format %{ "incq $dst\t# long" %} 7546 ins_encode %{ 7547 __ incrementq($dst$$Address); 7548 %} 7549 ins_pipe(ialu_mem_imm); 7550 %} 7551 7552 // XXX why does that use AddL 7553 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr) 7554 %{ 7555 predicate(UseIncDec); 7556 match(Set dst (AddL dst src)); 7557 effect(KILL cr); 7558 7559 format %{ "decq $dst\t# long" %} 7560 ins_encode %{ 7561 __ decrementq($dst$$Register); 7562 %} 7563 ins_pipe(ialu_reg); 7564 %} 7565 7566 // XXX why does that use AddL 7567 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr) 7568 %{ 7569 predicate(UseIncDec); 7570 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 7571 effect(KILL cr); 7572 7573 ins_cost(125); // XXX 7574 format %{ "decq $dst\t# long" %} 7575 ins_encode %{ 7576 __ decrementq($dst$$Address); 7577 %} 7578 ins_pipe(ialu_mem_imm); 7579 %} 7580 7581 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1) 7582 %{ 7583 match(Set dst (AddL src0 src1)); 7584 7585 ins_cost(110); 7586 format %{ "leaq $dst, [$src0 + $src1]\t# long" %} 7587 ins_encode %{ 7588 __ leaq($dst$$Register, Address($src0$$Register, $src1$$constant)); 7589 %} 7590 ins_pipe(ialu_reg_reg); 7591 %} 7592 7593 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr) 7594 %{ 7595 match(Set dst (AddP dst src)); 7596 effect(KILL cr); 7597 7598 format %{ "addq $dst, $src\t# ptr" %} 7599 ins_encode %{ 7600 __ addq($dst$$Register, $src$$Register); 7601 %} 7602 ins_pipe(ialu_reg_reg); 7603 %} 7604 7605 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr) 7606 %{ 7607 match(Set dst (AddP dst src)); 7608 effect(KILL cr); 7609 7610 format %{ "addq $dst, $src\t# ptr" %} 7611 ins_encode %{ 7612 __ addq($dst$$Register, $src$$constant); 7613 %} 7614 ins_pipe( ialu_reg ); 7615 %} 7616 7617 // XXX addP mem ops ???? 7618 7619 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1) 7620 %{ 7621 match(Set dst (AddP src0 src1)); 7622 7623 ins_cost(110); 7624 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %} 7625 ins_encode %{ 7626 __ leaq($dst$$Register, Address($src0$$Register, $src1$$constant)); 7627 %} 7628 ins_pipe(ialu_reg_reg); 7629 %} 7630 7631 instruct checkCastPP(rRegP dst) 7632 %{ 7633 match(Set dst (CheckCastPP dst)); 7634 7635 size(0); 7636 format %{ "# checkcastPP of $dst" %} 7637 ins_encode(/* empty encoding */); 7638 ins_pipe(empty); 7639 %} 7640 7641 instruct castPP(rRegP dst) 7642 %{ 7643 match(Set dst (CastPP dst)); 7644 7645 size(0); 7646 format %{ "# castPP of $dst" %} 7647 ins_encode(/* empty encoding */); 7648 ins_pipe(empty); 7649 %} 7650 7651 instruct castII(rRegI dst) 7652 %{ 7653 match(Set dst (CastII dst)); 7654 7655 size(0); 7656 format %{ "# castII of $dst" %} 7657 ins_encode(/* empty encoding */); 7658 ins_cost(0); 7659 ins_pipe(empty); 7660 %} 7661 7662 instruct castLL(rRegL dst) 7663 %{ 7664 match(Set dst (CastLL dst)); 7665 7666 size(0); 7667 format %{ "# castLL of $dst" %} 7668 ins_encode(/* empty encoding */); 7669 ins_cost(0); 7670 ins_pipe(empty); 7671 %} 7672 7673 instruct castFF(regF dst) 7674 %{ 7675 match(Set dst (CastFF dst)); 7676 7677 size(0); 7678 format %{ "# castFF of $dst" %} 7679 ins_encode(/* empty encoding */); 7680 ins_cost(0); 7681 ins_pipe(empty); 7682 %} 7683 7684 instruct castDD(regD dst) 7685 %{ 7686 match(Set dst (CastDD dst)); 7687 7688 size(0); 7689 format %{ "# castDD of $dst" %} 7690 ins_encode(/* empty encoding */); 7691 ins_cost(0); 7692 ins_pipe(empty); 7693 %} 7694 7695 // LoadP-locked same as a regular LoadP when used with compare-swap 7696 instruct loadPLocked(rRegP dst, memory mem) 7697 %{ 7698 match(Set dst (LoadPLocked mem)); 7699 7700 ins_cost(125); // XXX 7701 format %{ "movq $dst, $mem\t# ptr locked" %} 7702 ins_encode %{ 7703 __ movq($dst$$Register, $mem$$Address); 7704 %} 7705 ins_pipe(ialu_reg_mem); // XXX 7706 %} 7707 7708 // Conditional-store of the updated heap-top. 7709 // Used during allocation of the shared heap. 7710 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. 7711 7712 instruct storePConditional(memory heap_top_ptr, 7713 rax_RegP oldval, rRegP newval, 7714 rFlagsReg cr) 7715 %{ 7716 predicate(n->as_LoadStore()->barrier_data() == 0); 7717 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); 7718 7719 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " 7720 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %} 7721 ins_encode %{ 7722 __ lock(); 7723 __ cmpxchgq($newval$$Register, $heap_top_ptr$$Address); 7724 %} 7725 ins_pipe(pipe_cmpxchg); 7726 %} 7727 7728 // Conditional-store of an int value. 7729 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. 7730 instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr) 7731 %{ 7732 match(Set cr (StoreIConditional mem (Binary oldval newval))); 7733 effect(KILL oldval); 7734 7735 format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %} 7736 opcode(0x0F, 0xB1); 7737 ins_encode(lock_prefix, 7738 REX_reg_mem(newval, mem), 7739 OpcP, OpcS, 7740 reg_mem(newval, mem)); 7741 ins_pipe(pipe_cmpxchg); 7742 %} 7743 7744 // Conditional-store of a long value. 7745 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. 7746 instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr) 7747 %{ 7748 match(Set cr (StoreLConditional mem (Binary oldval newval))); 7749 effect(KILL oldval); 7750 7751 format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %} 7752 ins_encode %{ 7753 __ lock(); 7754 __ cmpxchgq($newval$$Register, $mem$$Address); 7755 %} 7756 ins_pipe(pipe_cmpxchg); 7757 %} 7758 7759 7760 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7761 instruct compareAndSwapP(rRegI res, 7762 memory mem_ptr, 7763 rax_RegP oldval, rRegP newval, 7764 rFlagsReg cr) 7765 %{ 7766 predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0); 7767 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7768 match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval))); 7769 effect(KILL cr, KILL oldval); 7770 7771 format %{ "cmpxchgq $mem_ptr,$newval\t# " 7772 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7773 "sete $res\n\t" 7774 "movzbl $res, $res" %} 7775 ins_encode %{ 7776 __ lock(); 7777 __ cmpxchgq($newval$$Register, $mem_ptr$$Address); 7778 __ sete($res$$Register); 7779 __ movzbl($res$$Register, $res$$Register); 7780 %} 7781 ins_pipe( pipe_cmpxchg ); 7782 %} 7783 7784 instruct compareAndSwapL(rRegI res, 7785 memory mem_ptr, 7786 rax_RegL oldval, rRegL newval, 7787 rFlagsReg cr) 7788 %{ 7789 predicate(VM_Version::supports_cx8()); 7790 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7791 match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval))); 7792 effect(KILL cr, KILL oldval); 7793 7794 format %{ "cmpxchgq $mem_ptr,$newval\t# " 7795 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7796 "sete $res\n\t" 7797 "movzbl $res, $res" %} 7798 ins_encode %{ 7799 __ lock(); 7800 __ cmpxchgq($newval$$Register, $mem_ptr$$Address); 7801 __ sete($res$$Register); 7802 __ movzbl($res$$Register, $res$$Register); 7803 %} 7804 ins_pipe( pipe_cmpxchg ); 7805 %} 7806 7807 instruct compareAndSwapI(rRegI res, 7808 memory mem_ptr, 7809 rax_RegI oldval, rRegI newval, 7810 rFlagsReg cr) 7811 %{ 7812 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7813 match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval))); 7814 effect(KILL cr, KILL oldval); 7815 7816 format %{ "cmpxchgl $mem_ptr,$newval\t# " 7817 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7818 "sete $res\n\t" 7819 "movzbl $res, $res" %} 7820 ins_encode %{ 7821 __ lock(); 7822 __ cmpxchgl($newval$$Register, $mem_ptr$$Address); 7823 __ sete($res$$Register); 7824 __ movzbl($res$$Register, $res$$Register); 7825 %} 7826 ins_pipe( pipe_cmpxchg ); 7827 %} 7828 7829 instruct compareAndSwapB(rRegI res, 7830 memory mem_ptr, 7831 rax_RegI oldval, rRegI newval, 7832 rFlagsReg cr) 7833 %{ 7834 match(Set res (CompareAndSwapB mem_ptr (Binary oldval newval))); 7835 match(Set res (WeakCompareAndSwapB mem_ptr (Binary oldval newval))); 7836 effect(KILL cr, KILL oldval); 7837 7838 format %{ "cmpxchgb $mem_ptr,$newval\t# " 7839 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7840 "sete $res\n\t" 7841 "movzbl $res, $res" %} 7842 ins_encode %{ 7843 __ lock(); 7844 __ cmpxchgb($newval$$Register, $mem_ptr$$Address); 7845 __ sete($res$$Register); 7846 __ movzbl($res$$Register, $res$$Register); 7847 %} 7848 ins_pipe( pipe_cmpxchg ); 7849 %} 7850 7851 instruct compareAndSwapS(rRegI res, 7852 memory mem_ptr, 7853 rax_RegI oldval, rRegI newval, 7854 rFlagsReg cr) 7855 %{ 7856 match(Set res (CompareAndSwapS mem_ptr (Binary oldval newval))); 7857 match(Set res (WeakCompareAndSwapS mem_ptr (Binary oldval newval))); 7858 effect(KILL cr, KILL oldval); 7859 7860 format %{ "cmpxchgw $mem_ptr,$newval\t# " 7861 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7862 "sete $res\n\t" 7863 "movzbl $res, $res" %} 7864 ins_encode %{ 7865 __ lock(); 7866 __ cmpxchgw($newval$$Register, $mem_ptr$$Address); 7867 __ sete($res$$Register); 7868 __ movzbl($res$$Register, $res$$Register); 7869 %} 7870 ins_pipe( pipe_cmpxchg ); 7871 %} 7872 7873 instruct compareAndSwapN(rRegI res, 7874 memory mem_ptr, 7875 rax_RegN oldval, rRegN newval, 7876 rFlagsReg cr) %{ 7877 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7878 match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval))); 7879 effect(KILL cr, KILL oldval); 7880 7881 format %{ "cmpxchgl $mem_ptr,$newval\t# " 7882 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 7883 "sete $res\n\t" 7884 "movzbl $res, $res" %} 7885 ins_encode %{ 7886 __ lock(); 7887 __ cmpxchgl($newval$$Register, $mem_ptr$$Address); 7888 __ sete($res$$Register); 7889 __ movzbl($res$$Register, $res$$Register); 7890 %} 7891 ins_pipe( pipe_cmpxchg ); 7892 %} 7893 7894 instruct compareAndExchangeB( 7895 memory mem_ptr, 7896 rax_RegI oldval, rRegI newval, 7897 rFlagsReg cr) 7898 %{ 7899 match(Set oldval (CompareAndExchangeB mem_ptr (Binary oldval newval))); 7900 effect(KILL cr); 7901 7902 format %{ "cmpxchgb $mem_ptr,$newval\t# " 7903 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7904 ins_encode %{ 7905 __ lock(); 7906 __ cmpxchgb($newval$$Register, $mem_ptr$$Address); 7907 %} 7908 ins_pipe( pipe_cmpxchg ); 7909 %} 7910 7911 instruct compareAndExchangeS( 7912 memory mem_ptr, 7913 rax_RegI oldval, rRegI newval, 7914 rFlagsReg cr) 7915 %{ 7916 match(Set oldval (CompareAndExchangeS mem_ptr (Binary oldval newval))); 7917 effect(KILL cr); 7918 7919 format %{ "cmpxchgw $mem_ptr,$newval\t# " 7920 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7921 ins_encode %{ 7922 __ lock(); 7923 __ cmpxchgw($newval$$Register, $mem_ptr$$Address); 7924 %} 7925 ins_pipe( pipe_cmpxchg ); 7926 %} 7927 7928 instruct compareAndExchangeI( 7929 memory mem_ptr, 7930 rax_RegI oldval, rRegI newval, 7931 rFlagsReg cr) 7932 %{ 7933 match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval))); 7934 effect(KILL cr); 7935 7936 format %{ "cmpxchgl $mem_ptr,$newval\t# " 7937 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7938 ins_encode %{ 7939 __ lock(); 7940 __ cmpxchgl($newval$$Register, $mem_ptr$$Address); 7941 %} 7942 ins_pipe( pipe_cmpxchg ); 7943 %} 7944 7945 instruct compareAndExchangeL( 7946 memory mem_ptr, 7947 rax_RegL oldval, rRegL newval, 7948 rFlagsReg cr) 7949 %{ 7950 predicate(VM_Version::supports_cx8()); 7951 match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval))); 7952 effect(KILL cr); 7953 7954 format %{ "cmpxchgq $mem_ptr,$newval\t# " 7955 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7956 ins_encode %{ 7957 __ lock(); 7958 __ cmpxchgq($newval$$Register, $mem_ptr$$Address); 7959 %} 7960 ins_pipe( pipe_cmpxchg ); 7961 %} 7962 7963 instruct compareAndExchangeN( 7964 memory mem_ptr, 7965 rax_RegN oldval, rRegN newval, 7966 rFlagsReg cr) %{ 7967 match(Set oldval (CompareAndExchangeN mem_ptr (Binary oldval newval))); 7968 effect(KILL cr); 7969 7970 format %{ "cmpxchgl $mem_ptr,$newval\t# " 7971 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7972 ins_encode %{ 7973 __ lock(); 7974 __ cmpxchgl($newval$$Register, $mem_ptr$$Address); 7975 %} 7976 ins_pipe( pipe_cmpxchg ); 7977 %} 7978 7979 instruct compareAndExchangeP( 7980 memory mem_ptr, 7981 rax_RegP oldval, rRegP newval, 7982 rFlagsReg cr) 7983 %{ 7984 predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0); 7985 match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval))); 7986 effect(KILL cr); 7987 7988 format %{ "cmpxchgq $mem_ptr,$newval\t# " 7989 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 7990 ins_encode %{ 7991 __ lock(); 7992 __ cmpxchgq($newval$$Register, $mem_ptr$$Address); 7993 %} 7994 ins_pipe( pipe_cmpxchg ); 7995 %} 7996 7997 instruct xaddB_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{ 7998 predicate(n->as_LoadStore()->result_not_used()); 7999 match(Set dummy (GetAndAddB mem add)); 8000 effect(KILL cr); 8001 format %{ "ADDB [$mem],$add" %} 8002 ins_encode %{ 8003 __ lock(); 8004 __ addb($mem$$Address, $add$$constant); 8005 %} 8006 ins_pipe( pipe_cmpxchg ); 8007 %} 8008 8009 instruct xaddB( memory mem, rRegI newval, rFlagsReg cr) %{ 8010 match(Set newval (GetAndAddB mem newval)); 8011 effect(KILL cr); 8012 format %{ "XADDB [$mem],$newval" %} 8013 ins_encode %{ 8014 __ lock(); 8015 __ xaddb($mem$$Address, $newval$$Register); 8016 %} 8017 ins_pipe( pipe_cmpxchg ); 8018 %} 8019 8020 instruct xaddS_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{ 8021 predicate(n->as_LoadStore()->result_not_used()); 8022 match(Set dummy (GetAndAddS mem add)); 8023 effect(KILL cr); 8024 format %{ "ADDW [$mem],$add" %} 8025 ins_encode %{ 8026 __ lock(); 8027 __ addw($mem$$Address, $add$$constant); 8028 %} 8029 ins_pipe( pipe_cmpxchg ); 8030 %} 8031 8032 instruct xaddS( memory mem, rRegI newval, rFlagsReg cr) %{ 8033 match(Set newval (GetAndAddS mem newval)); 8034 effect(KILL cr); 8035 format %{ "XADDW [$mem],$newval" %} 8036 ins_encode %{ 8037 __ lock(); 8038 __ xaddw($mem$$Address, $newval$$Register); 8039 %} 8040 ins_pipe( pipe_cmpxchg ); 8041 %} 8042 8043 instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{ 8044 predicate(n->as_LoadStore()->result_not_used()); 8045 match(Set dummy (GetAndAddI mem add)); 8046 effect(KILL cr); 8047 format %{ "ADDL [$mem],$add" %} 8048 ins_encode %{ 8049 __ lock(); 8050 __ addl($mem$$Address, $add$$constant); 8051 %} 8052 ins_pipe( pipe_cmpxchg ); 8053 %} 8054 8055 instruct xaddI( memory mem, rRegI newval, rFlagsReg cr) %{ 8056 match(Set newval (GetAndAddI mem newval)); 8057 effect(KILL cr); 8058 format %{ "XADDL [$mem],$newval" %} 8059 ins_encode %{ 8060 __ lock(); 8061 __ xaddl($mem$$Address, $newval$$Register); 8062 %} 8063 ins_pipe( pipe_cmpxchg ); 8064 %} 8065 8066 instruct xaddL_no_res( memory mem, Universe dummy, immL32 add, rFlagsReg cr) %{ 8067 predicate(n->as_LoadStore()->result_not_used()); 8068 match(Set dummy (GetAndAddL mem add)); 8069 effect(KILL cr); 8070 format %{ "ADDQ [$mem],$add" %} 8071 ins_encode %{ 8072 __ lock(); 8073 __ addq($mem$$Address, $add$$constant); 8074 %} 8075 ins_pipe( pipe_cmpxchg ); 8076 %} 8077 8078 instruct xaddL( memory mem, rRegL newval, rFlagsReg cr) %{ 8079 match(Set newval (GetAndAddL mem newval)); 8080 effect(KILL cr); 8081 format %{ "XADDQ [$mem],$newval" %} 8082 ins_encode %{ 8083 __ lock(); 8084 __ xaddq($mem$$Address, $newval$$Register); 8085 %} 8086 ins_pipe( pipe_cmpxchg ); 8087 %} 8088 8089 instruct xchgB( memory mem, rRegI newval) %{ 8090 match(Set newval (GetAndSetB mem newval)); 8091 format %{ "XCHGB $newval,[$mem]" %} 8092 ins_encode %{ 8093 __ xchgb($newval$$Register, $mem$$Address); 8094 %} 8095 ins_pipe( pipe_cmpxchg ); 8096 %} 8097 8098 instruct xchgS( memory mem, rRegI newval) %{ 8099 match(Set newval (GetAndSetS mem newval)); 8100 format %{ "XCHGW $newval,[$mem]" %} 8101 ins_encode %{ 8102 __ xchgw($newval$$Register, $mem$$Address); 8103 %} 8104 ins_pipe( pipe_cmpxchg ); 8105 %} 8106 8107 instruct xchgI( memory mem, rRegI newval) %{ 8108 match(Set newval (GetAndSetI mem newval)); 8109 format %{ "XCHGL $newval,[$mem]" %} 8110 ins_encode %{ 8111 __ xchgl($newval$$Register, $mem$$Address); 8112 %} 8113 ins_pipe( pipe_cmpxchg ); 8114 %} 8115 8116 instruct xchgL( memory mem, rRegL newval) %{ 8117 match(Set newval (GetAndSetL mem newval)); 8118 format %{ "XCHGL $newval,[$mem]" %} 8119 ins_encode %{ 8120 __ xchgq($newval$$Register, $mem$$Address); 8121 %} 8122 ins_pipe( pipe_cmpxchg ); 8123 %} 8124 8125 instruct xchgP( memory mem, rRegP newval) %{ 8126 match(Set newval (GetAndSetP mem newval)); 8127 predicate(n->as_LoadStore()->barrier_data() == 0); 8128 format %{ "XCHGQ $newval,[$mem]" %} 8129 ins_encode %{ 8130 __ xchgq($newval$$Register, $mem$$Address); 8131 %} 8132 ins_pipe( pipe_cmpxchg ); 8133 %} 8134 8135 instruct xchgN( memory mem, rRegN newval) %{ 8136 match(Set newval (GetAndSetN mem newval)); 8137 format %{ "XCHGL $newval,$mem]" %} 8138 ins_encode %{ 8139 __ xchgl($newval$$Register, $mem$$Address); 8140 %} 8141 ins_pipe( pipe_cmpxchg ); 8142 %} 8143 8144 //----------Abs Instructions------------------------------------------- 8145 8146 // Integer Absolute Instructions 8147 instruct absI_rReg(rRegI dst, rRegI src, rRegI tmp, rFlagsReg cr) 8148 %{ 8149 match(Set dst (AbsI src)); 8150 effect(TEMP dst, TEMP tmp, KILL cr); 8151 format %{ "movl $tmp, $src\n\t" 8152 "sarl $tmp, 31\n\t" 8153 "movl $dst, $src\n\t" 8154 "xorl $dst, $tmp\n\t" 8155 "subl $dst, $tmp\n" 8156 %} 8157 ins_encode %{ 8158 __ movl($tmp$$Register, $src$$Register); 8159 __ sarl($tmp$$Register, 31); 8160 __ movl($dst$$Register, $src$$Register); 8161 __ xorl($dst$$Register, $tmp$$Register); 8162 __ subl($dst$$Register, $tmp$$Register); 8163 %} 8164 8165 ins_pipe(ialu_reg_reg); 8166 %} 8167 8168 // Long Absolute Instructions 8169 instruct absL_rReg(rRegL dst, rRegL src, rRegL tmp, rFlagsReg cr) 8170 %{ 8171 match(Set dst (AbsL src)); 8172 effect(TEMP dst, TEMP tmp, KILL cr); 8173 format %{ "movq $tmp, $src\n\t" 8174 "sarq $tmp, 63\n\t" 8175 "movq $dst, $src\n\t" 8176 "xorq $dst, $tmp\n\t" 8177 "subq $dst, $tmp\n" 8178 %} 8179 ins_encode %{ 8180 __ movq($tmp$$Register, $src$$Register); 8181 __ sarq($tmp$$Register, 63); 8182 __ movq($dst$$Register, $src$$Register); 8183 __ xorq($dst$$Register, $tmp$$Register); 8184 __ subq($dst$$Register, $tmp$$Register); 8185 %} 8186 8187 ins_pipe(ialu_reg_reg); 8188 %} 8189 8190 //----------Subtraction Instructions------------------------------------------- 8191 8192 // Integer Subtraction Instructions 8193 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 8194 %{ 8195 match(Set dst (SubI dst src)); 8196 effect(KILL cr); 8197 8198 format %{ "subl $dst, $src\t# int" %} 8199 ins_encode %{ 8200 __ subl($dst$$Register, $src$$Register); 8201 %} 8202 ins_pipe(ialu_reg_reg); 8203 %} 8204 8205 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 8206 %{ 8207 match(Set dst (SubI dst src)); 8208 effect(KILL cr); 8209 8210 format %{ "subl $dst, $src\t# int" %} 8211 ins_encode %{ 8212 __ subl($dst$$Register, $src$$constant); 8213 %} 8214 ins_pipe(ialu_reg); 8215 %} 8216 8217 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 8218 %{ 8219 match(Set dst (SubI dst (LoadI src))); 8220 effect(KILL cr); 8221 8222 ins_cost(125); 8223 format %{ "subl $dst, $src\t# int" %} 8224 ins_encode %{ 8225 __ subl($dst$$Register, $src$$Address); 8226 %} 8227 ins_pipe(ialu_reg_mem); 8228 %} 8229 8230 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 8231 %{ 8232 match(Set dst (StoreI dst (SubI (LoadI dst) src))); 8233 effect(KILL cr); 8234 8235 ins_cost(150); 8236 format %{ "subl $dst, $src\t# int" %} 8237 ins_encode %{ 8238 __ subl($dst$$Address, $src$$Register); 8239 %} 8240 ins_pipe(ialu_mem_reg); 8241 %} 8242 8243 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr) 8244 %{ 8245 match(Set dst (StoreI dst (SubI (LoadI dst) src))); 8246 effect(KILL cr); 8247 8248 ins_cost(125); // XXX 8249 format %{ "subl $dst, $src\t# int" %} 8250 ins_encode %{ 8251 __ subl($dst$$Address, $src$$constant); 8252 %} 8253 ins_pipe(ialu_mem_imm); 8254 %} 8255 8256 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 8257 %{ 8258 match(Set dst (SubL dst src)); 8259 effect(KILL cr); 8260 8261 format %{ "subq $dst, $src\t# long" %} 8262 ins_encode %{ 8263 __ subq($dst$$Register, $src$$Register); 8264 %} 8265 ins_pipe(ialu_reg_reg); 8266 %} 8267 8268 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr) 8269 %{ 8270 match(Set dst (SubL dst src)); 8271 effect(KILL cr); 8272 8273 format %{ "subq $dst, $src\t# long" %} 8274 ins_encode %{ 8275 __ subq($dst$$Register, $src$$constant); 8276 %} 8277 ins_pipe(ialu_reg); 8278 %} 8279 8280 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 8281 %{ 8282 match(Set dst (SubL dst (LoadL src))); 8283 effect(KILL cr); 8284 8285 ins_cost(125); 8286 format %{ "subq $dst, $src\t# long" %} 8287 ins_encode %{ 8288 __ subq($dst$$Register, $src$$Address); 8289 %} 8290 ins_pipe(ialu_reg_mem); 8291 %} 8292 8293 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 8294 %{ 8295 match(Set dst (StoreL dst (SubL (LoadL dst) src))); 8296 effect(KILL cr); 8297 8298 ins_cost(150); 8299 format %{ "subq $dst, $src\t# long" %} 8300 ins_encode %{ 8301 __ subq($dst$$Address, $src$$Register); 8302 %} 8303 ins_pipe(ialu_mem_reg); 8304 %} 8305 8306 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 8307 %{ 8308 match(Set dst (StoreL dst (SubL (LoadL dst) src))); 8309 effect(KILL cr); 8310 8311 ins_cost(125); // XXX 8312 format %{ "subq $dst, $src\t# long" %} 8313 ins_encode %{ 8314 __ subq($dst$$Address, $src$$constant); 8315 %} 8316 ins_pipe(ialu_mem_imm); 8317 %} 8318 8319 // Subtract from a pointer 8320 // XXX hmpf??? 8321 instruct subP_rReg(rRegP dst, rRegI src, immI_0 zero, rFlagsReg cr) 8322 %{ 8323 match(Set dst (AddP dst (SubI zero src))); 8324 effect(KILL cr); 8325 8326 format %{ "subq $dst, $src\t# ptr - int" %} 8327 opcode(0x2B); 8328 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 8329 ins_pipe(ialu_reg_reg); 8330 %} 8331 8332 instruct negI_rReg(rRegI dst, immI_0 zero, rFlagsReg cr) 8333 %{ 8334 match(Set dst (SubI zero dst)); 8335 effect(KILL cr); 8336 8337 format %{ "negl $dst\t# int" %} 8338 ins_encode %{ 8339 __ negl($dst$$Register); 8340 %} 8341 ins_pipe(ialu_reg); 8342 %} 8343 8344 instruct negI_rReg_2(rRegI dst, rFlagsReg cr) 8345 %{ 8346 match(Set dst (NegI dst)); 8347 effect(KILL cr); 8348 8349 format %{ "negl $dst\t# int" %} 8350 ins_encode %{ 8351 __ negl($dst$$Register); 8352 %} 8353 ins_pipe(ialu_reg); 8354 %} 8355 8356 instruct negI_mem(memory dst, immI_0 zero, rFlagsReg cr) 8357 %{ 8358 match(Set dst (StoreI dst (SubI zero (LoadI dst)))); 8359 effect(KILL cr); 8360 8361 format %{ "negl $dst\t# int" %} 8362 ins_encode %{ 8363 __ negl($dst$$Address); 8364 %} 8365 ins_pipe(ialu_reg); 8366 %} 8367 8368 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr) 8369 %{ 8370 match(Set dst (SubL zero dst)); 8371 effect(KILL cr); 8372 8373 format %{ "negq $dst\t# long" %} 8374 ins_encode %{ 8375 __ negq($dst$$Register); 8376 %} 8377 ins_pipe(ialu_reg); 8378 %} 8379 8380 instruct negL_rReg_2(rRegL dst, rFlagsReg cr) 8381 %{ 8382 match(Set dst (NegL dst)); 8383 effect(KILL cr); 8384 8385 format %{ "negq $dst\t# int" %} 8386 ins_encode %{ 8387 __ negq($dst$$Register); 8388 %} 8389 ins_pipe(ialu_reg); 8390 %} 8391 8392 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr) 8393 %{ 8394 match(Set dst (StoreL dst (SubL zero (LoadL dst)))); 8395 effect(KILL cr); 8396 8397 format %{ "negq $dst\t# long" %} 8398 ins_encode %{ 8399 __ negq($dst$$Address); 8400 %} 8401 ins_pipe(ialu_reg); 8402 %} 8403 8404 //----------Multiplication/Division Instructions------------------------------- 8405 // Integer Multiplication Instructions 8406 // Multiply Register 8407 8408 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 8409 %{ 8410 match(Set dst (MulI dst src)); 8411 effect(KILL cr); 8412 8413 ins_cost(300); 8414 format %{ "imull $dst, $src\t# int" %} 8415 ins_encode %{ 8416 __ imull($dst$$Register, $src$$Register); 8417 %} 8418 ins_pipe(ialu_reg_reg_alu0); 8419 %} 8420 8421 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr) 8422 %{ 8423 match(Set dst (MulI src imm)); 8424 effect(KILL cr); 8425 8426 ins_cost(300); 8427 format %{ "imull $dst, $src, $imm\t# int" %} 8428 ins_encode %{ 8429 __ imull($dst$$Register, $src$$Register, $imm$$constant); 8430 %} 8431 ins_pipe(ialu_reg_reg_alu0); 8432 %} 8433 8434 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr) 8435 %{ 8436 match(Set dst (MulI dst (LoadI src))); 8437 effect(KILL cr); 8438 8439 ins_cost(350); 8440 format %{ "imull $dst, $src\t# int" %} 8441 ins_encode %{ 8442 __ imull($dst$$Register, $src$$Address); 8443 %} 8444 ins_pipe(ialu_reg_mem_alu0); 8445 %} 8446 8447 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr) 8448 %{ 8449 match(Set dst (MulI (LoadI src) imm)); 8450 effect(KILL cr); 8451 8452 ins_cost(300); 8453 format %{ "imull $dst, $src, $imm\t# int" %} 8454 ins_encode %{ 8455 __ imull($dst$$Register, $src$$Address, $imm$$constant); 8456 %} 8457 ins_pipe(ialu_reg_mem_alu0); 8458 %} 8459 8460 instruct mulAddS2I_rReg(rRegI dst, rRegI src1, rRegI src2, rRegI src3, rFlagsReg cr) 8461 %{ 8462 match(Set dst (MulAddS2I (Binary dst src1) (Binary src2 src3))); 8463 effect(KILL cr, KILL src2); 8464 8465 expand %{ mulI_rReg(dst, src1, cr); 8466 mulI_rReg(src2, src3, cr); 8467 addI_rReg(dst, src2, cr); %} 8468 %} 8469 8470 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 8471 %{ 8472 match(Set dst (MulL dst src)); 8473 effect(KILL cr); 8474 8475 ins_cost(300); 8476 format %{ "imulq $dst, $src\t# long" %} 8477 ins_encode %{ 8478 __ imulq($dst$$Register, $src$$Register); 8479 %} 8480 ins_pipe(ialu_reg_reg_alu0); 8481 %} 8482 8483 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr) 8484 %{ 8485 match(Set dst (MulL src imm)); 8486 effect(KILL cr); 8487 8488 ins_cost(300); 8489 format %{ "imulq $dst, $src, $imm\t# long" %} 8490 ins_encode %{ 8491 __ imulq($dst$$Register, $src$$Register, $imm$$constant); 8492 %} 8493 ins_pipe(ialu_reg_reg_alu0); 8494 %} 8495 8496 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr) 8497 %{ 8498 match(Set dst (MulL dst (LoadL src))); 8499 effect(KILL cr); 8500 8501 ins_cost(350); 8502 format %{ "imulq $dst, $src\t# long" %} 8503 ins_encode %{ 8504 __ imulq($dst$$Register, $src$$Address); 8505 %} 8506 ins_pipe(ialu_reg_mem_alu0); 8507 %} 8508 8509 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr) 8510 %{ 8511 match(Set dst (MulL (LoadL src) imm)); 8512 effect(KILL cr); 8513 8514 ins_cost(300); 8515 format %{ "imulq $dst, $src, $imm\t# long" %} 8516 ins_encode %{ 8517 __ imulq($dst$$Register, $src$$Address, $imm$$constant); 8518 %} 8519 ins_pipe(ialu_reg_mem_alu0); 8520 %} 8521 8522 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr) 8523 %{ 8524 match(Set dst (MulHiL src rax)); 8525 effect(USE_KILL rax, KILL cr); 8526 8527 ins_cost(300); 8528 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %} 8529 ins_encode %{ 8530 __ imulq($src$$Register); 8531 %} 8532 ins_pipe(ialu_reg_reg_alu0); 8533 %} 8534 8535 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, 8536 rFlagsReg cr) 8537 %{ 8538 match(Set rax (DivI rax div)); 8539 effect(KILL rdx, KILL cr); 8540 8541 ins_cost(30*100+10*100); // XXX 8542 format %{ "cmpl rax, 0x80000000\t# idiv\n\t" 8543 "jne,s normal\n\t" 8544 "xorl rdx, rdx\n\t" 8545 "cmpl $div, -1\n\t" 8546 "je,s done\n" 8547 "normal: cdql\n\t" 8548 "idivl $div\n" 8549 "done:" %} 8550 ins_encode(cdql_enc(div)); 8551 ins_pipe(ialu_reg_reg_alu0); 8552 %} 8553 8554 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div, 8555 rFlagsReg cr) 8556 %{ 8557 match(Set rax (DivL rax div)); 8558 effect(KILL rdx, KILL cr); 8559 8560 ins_cost(30*100+10*100); // XXX 8561 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t" 8562 "cmpq rax, rdx\n\t" 8563 "jne,s normal\n\t" 8564 "xorl rdx, rdx\n\t" 8565 "cmpq $div, -1\n\t" 8566 "je,s done\n" 8567 "normal: cdqq\n\t" 8568 "idivq $div\n" 8569 "done:" %} 8570 ins_encode(cdqq_enc(div)); 8571 ins_pipe(ialu_reg_reg_alu0); 8572 %} 8573 8574 // Integer DIVMOD with Register, both quotient and mod results 8575 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, 8576 rFlagsReg cr) 8577 %{ 8578 match(DivModI rax div); 8579 effect(KILL cr); 8580 8581 ins_cost(30*100+10*100); // XXX 8582 format %{ "cmpl rax, 0x80000000\t# idiv\n\t" 8583 "jne,s normal\n\t" 8584 "xorl rdx, rdx\n\t" 8585 "cmpl $div, -1\n\t" 8586 "je,s done\n" 8587 "normal: cdql\n\t" 8588 "idivl $div\n" 8589 "done:" %} 8590 ins_encode(cdql_enc(div)); 8591 ins_pipe(pipe_slow); 8592 %} 8593 8594 // Long DIVMOD with Register, both quotient and mod results 8595 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div, 8596 rFlagsReg cr) 8597 %{ 8598 match(DivModL rax div); 8599 effect(KILL cr); 8600 8601 ins_cost(30*100+10*100); // XXX 8602 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t" 8603 "cmpq rax, rdx\n\t" 8604 "jne,s normal\n\t" 8605 "xorl rdx, rdx\n\t" 8606 "cmpq $div, -1\n\t" 8607 "je,s done\n" 8608 "normal: cdqq\n\t" 8609 "idivq $div\n" 8610 "done:" %} 8611 ins_encode(cdqq_enc(div)); 8612 ins_pipe(pipe_slow); 8613 %} 8614 8615 //----------- DivL-By-Constant-Expansions-------------------------------------- 8616 // DivI cases are handled by the compiler 8617 8618 // Magic constant, reciprocal of 10 8619 instruct loadConL_0x6666666666666667(rRegL dst) 8620 %{ 8621 effect(DEF dst); 8622 8623 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %} 8624 ins_encode(load_immL(dst, 0x6666666666666667)); 8625 ins_pipe(ialu_reg); 8626 %} 8627 8628 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr) 8629 %{ 8630 effect(DEF dst, USE src, USE_KILL rax, KILL cr); 8631 8632 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %} 8633 ins_encode %{ 8634 __ imulq($src$$Register); 8635 %} 8636 ins_pipe(ialu_reg_reg_alu0); 8637 %} 8638 8639 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr) 8640 %{ 8641 effect(USE_DEF dst, KILL cr); 8642 8643 format %{ "sarq $dst, #63\t# Used in div-by-10" %} 8644 ins_encode %{ 8645 __ sarq($dst$$Register, 63); 8646 %} 8647 ins_pipe(ialu_reg); 8648 %} 8649 8650 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr) 8651 %{ 8652 effect(USE_DEF dst, KILL cr); 8653 8654 format %{ "sarq $dst, #2\t# Used in div-by-10" %} 8655 ins_encode %{ 8656 __ sarq($dst$$Register, 2); 8657 %} 8658 ins_pipe(ialu_reg); 8659 %} 8660 8661 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div) 8662 %{ 8663 match(Set dst (DivL src div)); 8664 8665 ins_cost((5+8)*100); 8666 expand %{ 8667 rax_RegL rax; // Killed temp 8668 rFlagsReg cr; // Killed 8669 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667 8670 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src 8671 sarL_rReg_63(src, cr); // sarq src, 63 8672 sarL_rReg_2(dst, cr); // sarq rdx, 2 8673 subL_rReg(dst, src, cr); // subl rdx, src 8674 %} 8675 %} 8676 8677 //----------------------------------------------------------------------------- 8678 8679 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div, 8680 rFlagsReg cr) 8681 %{ 8682 match(Set rdx (ModI rax div)); 8683 effect(KILL rax, KILL cr); 8684 8685 ins_cost(300); // XXX 8686 format %{ "cmpl rax, 0x80000000\t# irem\n\t" 8687 "jne,s normal\n\t" 8688 "xorl rdx, rdx\n\t" 8689 "cmpl $div, -1\n\t" 8690 "je,s done\n" 8691 "normal: cdql\n\t" 8692 "idivl $div\n" 8693 "done:" %} 8694 ins_encode(cdql_enc(div)); 8695 ins_pipe(ialu_reg_reg_alu0); 8696 %} 8697 8698 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div, 8699 rFlagsReg cr) 8700 %{ 8701 match(Set rdx (ModL rax div)); 8702 effect(KILL rax, KILL cr); 8703 8704 ins_cost(300); // XXX 8705 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t" 8706 "cmpq rax, rdx\n\t" 8707 "jne,s normal\n\t" 8708 "xorl rdx, rdx\n\t" 8709 "cmpq $div, -1\n\t" 8710 "je,s done\n" 8711 "normal: cdqq\n\t" 8712 "idivq $div\n" 8713 "done:" %} 8714 ins_encode(cdqq_enc(div)); 8715 ins_pipe(ialu_reg_reg_alu0); 8716 %} 8717 8718 // Integer Shift Instructions 8719 // Shift Left by one 8720 instruct salI_rReg_1(rRegI dst, immI_1 shift, rFlagsReg cr) 8721 %{ 8722 match(Set dst (LShiftI dst shift)); 8723 effect(KILL cr); 8724 8725 format %{ "sall $dst, $shift" %} 8726 ins_encode %{ 8727 __ sall($dst$$Register, $shift$$constant); 8728 %} 8729 ins_pipe(ialu_reg); 8730 %} 8731 8732 // Shift Left by one 8733 instruct salI_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 8734 %{ 8735 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8736 effect(KILL cr); 8737 8738 format %{ "sall $dst, $shift\t" %} 8739 ins_encode %{ 8740 __ sall($dst$$Address, $shift$$constant); 8741 %} 8742 ins_pipe(ialu_mem_imm); 8743 %} 8744 8745 // Shift Left by 8-bit immediate 8746 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 8747 %{ 8748 match(Set dst (LShiftI dst shift)); 8749 effect(KILL cr); 8750 8751 format %{ "sall $dst, $shift" %} 8752 ins_encode %{ 8753 __ sall($dst$$Register, $shift$$constant); 8754 %} 8755 ins_pipe(ialu_reg); 8756 %} 8757 8758 // Shift Left by 8-bit immediate 8759 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8760 %{ 8761 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8762 effect(KILL cr); 8763 8764 format %{ "sall $dst, $shift" %} 8765 ins_encode %{ 8766 __ sall($dst$$Address, $shift$$constant); 8767 %} 8768 ins_pipe(ialu_mem_imm); 8769 %} 8770 8771 // Shift Left by variable 8772 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 8773 %{ 8774 match(Set dst (LShiftI dst shift)); 8775 effect(KILL cr); 8776 8777 format %{ "sall $dst, $shift" %} 8778 ins_encode %{ 8779 __ sall($dst$$Register); 8780 %} 8781 ins_pipe(ialu_reg_reg); 8782 %} 8783 8784 // Shift Left by variable 8785 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 8786 %{ 8787 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8788 effect(KILL cr); 8789 8790 format %{ "sall $dst, $shift" %} 8791 ins_encode %{ 8792 __ sall($dst$$Address); 8793 %} 8794 ins_pipe(ialu_mem_reg); 8795 %} 8796 8797 // Arithmetic shift right by one 8798 instruct sarI_rReg_1(rRegI dst, immI_1 shift, rFlagsReg cr) 8799 %{ 8800 match(Set dst (RShiftI dst shift)); 8801 effect(KILL cr); 8802 8803 format %{ "sarl $dst, $shift" %} 8804 ins_encode %{ 8805 __ sarl($dst$$Register, $shift$$constant); 8806 %} 8807 ins_pipe(ialu_reg); 8808 %} 8809 8810 // Arithmetic shift right by one 8811 instruct sarI_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 8812 %{ 8813 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8814 effect(KILL cr); 8815 8816 format %{ "sarl $dst, $shift" %} 8817 ins_encode %{ 8818 __ sarl($dst$$Address, $shift$$constant); 8819 %} 8820 ins_pipe(ialu_mem_imm); 8821 %} 8822 8823 // Arithmetic Shift Right by 8-bit immediate 8824 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 8825 %{ 8826 match(Set dst (RShiftI dst shift)); 8827 effect(KILL cr); 8828 8829 format %{ "sarl $dst, $shift" %} 8830 ins_encode %{ 8831 __ sarl($dst$$Register, $shift$$constant); 8832 %} 8833 ins_pipe(ialu_mem_imm); 8834 %} 8835 8836 // Arithmetic Shift Right by 8-bit immediate 8837 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8838 %{ 8839 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8840 effect(KILL cr); 8841 8842 format %{ "sarl $dst, $shift" %} 8843 ins_encode %{ 8844 __ sarl($dst$$Address, $shift$$constant); 8845 %} 8846 ins_pipe(ialu_mem_imm); 8847 %} 8848 8849 // Arithmetic Shift Right by variable 8850 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 8851 %{ 8852 match(Set dst (RShiftI dst shift)); 8853 effect(KILL cr); 8854 format %{ "sarl $dst, $shift" %} 8855 ins_encode %{ 8856 __ sarl($dst$$Register); 8857 %} 8858 ins_pipe(ialu_reg_reg); 8859 %} 8860 8861 // Arithmetic Shift Right by variable 8862 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 8863 %{ 8864 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8865 effect(KILL cr); 8866 8867 format %{ "sarl $dst, $shift" %} 8868 ins_encode %{ 8869 __ sarl($dst$$Address); 8870 %} 8871 ins_pipe(ialu_mem_reg); 8872 %} 8873 8874 // Logical shift right by one 8875 instruct shrI_rReg_1(rRegI dst, immI_1 shift, rFlagsReg cr) 8876 %{ 8877 match(Set dst (URShiftI dst shift)); 8878 effect(KILL cr); 8879 8880 format %{ "shrl $dst, $shift" %} 8881 ins_encode %{ 8882 __ shrl($dst$$Register, $shift$$constant); 8883 %} 8884 ins_pipe(ialu_reg); 8885 %} 8886 8887 // Logical shift right by one 8888 instruct shrI_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 8889 %{ 8890 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 8891 effect(KILL cr); 8892 8893 format %{ "shrl $dst, $shift" %} 8894 ins_encode %{ 8895 __ shrl($dst$$Address, $shift$$constant); 8896 %} 8897 ins_pipe(ialu_mem_imm); 8898 %} 8899 8900 // Logical Shift Right by 8-bit immediate 8901 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 8902 %{ 8903 match(Set dst (URShiftI dst shift)); 8904 effect(KILL cr); 8905 8906 format %{ "shrl $dst, $shift" %} 8907 ins_encode %{ 8908 __ shrl($dst$$Register, $shift$$constant); 8909 %} 8910 ins_pipe(ialu_reg); 8911 %} 8912 8913 // Logical Shift Right by 8-bit immediate 8914 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8915 %{ 8916 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 8917 effect(KILL cr); 8918 8919 format %{ "shrl $dst, $shift" %} 8920 ins_encode %{ 8921 __ shrl($dst$$Address, $shift$$constant); 8922 %} 8923 ins_pipe(ialu_mem_imm); 8924 %} 8925 8926 // Logical Shift Right by variable 8927 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 8928 %{ 8929 match(Set dst (URShiftI dst shift)); 8930 effect(KILL cr); 8931 8932 format %{ "shrl $dst, $shift" %} 8933 ins_encode %{ 8934 __ shrl($dst$$Register); 8935 %} 8936 ins_pipe(ialu_reg_reg); 8937 %} 8938 8939 // Logical Shift Right by variable 8940 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 8941 %{ 8942 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 8943 effect(KILL cr); 8944 8945 format %{ "shrl $dst, $shift" %} 8946 ins_encode %{ 8947 __ shrl($dst$$Address); 8948 %} 8949 ins_pipe(ialu_mem_reg); 8950 %} 8951 8952 // Long Shift Instructions 8953 // Shift Left by one 8954 instruct salL_rReg_1(rRegL dst, immI_1 shift, rFlagsReg cr) 8955 %{ 8956 match(Set dst (LShiftL dst shift)); 8957 effect(KILL cr); 8958 8959 format %{ "salq $dst, $shift" %} 8960 ins_encode %{ 8961 __ salq($dst$$Register, $shift$$constant); 8962 %} 8963 ins_pipe(ialu_reg); 8964 %} 8965 8966 // Shift Left by one 8967 instruct salL_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 8968 %{ 8969 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 8970 effect(KILL cr); 8971 8972 format %{ "salq $dst, $shift" %} 8973 ins_encode %{ 8974 __ salq($dst$$Address, $shift$$constant); 8975 %} 8976 ins_pipe(ialu_mem_imm); 8977 %} 8978 8979 // Shift Left by 8-bit immediate 8980 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 8981 %{ 8982 match(Set dst (LShiftL dst shift)); 8983 effect(KILL cr); 8984 8985 format %{ "salq $dst, $shift" %} 8986 ins_encode %{ 8987 __ salq($dst$$Register, $shift$$constant); 8988 %} 8989 ins_pipe(ialu_reg); 8990 %} 8991 8992 // Shift Left by 8-bit immediate 8993 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8994 %{ 8995 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 8996 effect(KILL cr); 8997 8998 format %{ "salq $dst, $shift" %} 8999 ins_encode %{ 9000 __ salq($dst$$Address, $shift$$constant); 9001 %} 9002 ins_pipe(ialu_mem_imm); 9003 %} 9004 9005 // Shift Left by variable 9006 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9007 %{ 9008 match(Set dst (LShiftL dst shift)); 9009 effect(KILL cr); 9010 9011 format %{ "salq $dst, $shift" %} 9012 ins_encode %{ 9013 __ salq($dst$$Register); 9014 %} 9015 ins_pipe(ialu_reg_reg); 9016 %} 9017 9018 // Shift Left by variable 9019 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9020 %{ 9021 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 9022 effect(KILL cr); 9023 9024 format %{ "salq $dst, $shift" %} 9025 ins_encode %{ 9026 __ salq($dst$$Address); 9027 %} 9028 ins_pipe(ialu_mem_reg); 9029 %} 9030 9031 // Arithmetic shift right by one 9032 instruct sarL_rReg_1(rRegL dst, immI_1 shift, rFlagsReg cr) 9033 %{ 9034 match(Set dst (RShiftL dst shift)); 9035 effect(KILL cr); 9036 9037 format %{ "sarq $dst, $shift" %} 9038 ins_encode %{ 9039 __ sarq($dst$$Register, $shift$$constant); 9040 %} 9041 ins_pipe(ialu_reg); 9042 %} 9043 9044 // Arithmetic shift right by one 9045 instruct sarL_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 9046 %{ 9047 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9048 effect(KILL cr); 9049 9050 format %{ "sarq $dst, $shift" %} 9051 ins_encode %{ 9052 __ sarq($dst$$Address, $shift$$constant); 9053 %} 9054 ins_pipe(ialu_mem_imm); 9055 %} 9056 9057 // Arithmetic Shift Right by 8-bit immediate 9058 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 9059 %{ 9060 match(Set dst (RShiftL dst shift)); 9061 effect(KILL cr); 9062 9063 format %{ "sarq $dst, $shift" %} 9064 ins_encode %{ 9065 __ sarq($dst$$Register, (unsigned char)($shift$$constant & 0x3F)); 9066 %} 9067 ins_pipe(ialu_mem_imm); 9068 %} 9069 9070 // Arithmetic Shift Right by 8-bit immediate 9071 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9072 %{ 9073 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9074 effect(KILL cr); 9075 9076 format %{ "sarq $dst, $shift" %} 9077 ins_encode %{ 9078 __ sarq($dst$$Address, (unsigned char)($shift$$constant & 0x3F)); 9079 %} 9080 ins_pipe(ialu_mem_imm); 9081 %} 9082 9083 // Arithmetic Shift Right by variable 9084 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9085 %{ 9086 match(Set dst (RShiftL dst shift)); 9087 effect(KILL cr); 9088 9089 format %{ "sarq $dst, $shift" %} 9090 ins_encode %{ 9091 __ sarq($dst$$Register); 9092 %} 9093 ins_pipe(ialu_reg_reg); 9094 %} 9095 9096 // Arithmetic Shift Right by variable 9097 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9098 %{ 9099 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9100 effect(KILL cr); 9101 9102 format %{ "sarq $dst, $shift" %} 9103 ins_encode %{ 9104 __ sarq($dst$$Address); 9105 %} 9106 ins_pipe(ialu_mem_reg); 9107 %} 9108 9109 // Logical shift right by one 9110 instruct shrL_rReg_1(rRegL dst, immI_1 shift, rFlagsReg cr) 9111 %{ 9112 match(Set dst (URShiftL dst shift)); 9113 effect(KILL cr); 9114 9115 format %{ "shrq $dst, $shift" %} 9116 ins_encode %{ 9117 __ shrq($dst$$Register, $shift$$constant); 9118 %} 9119 ins_pipe(ialu_reg); 9120 %} 9121 9122 // Logical shift right by one 9123 instruct shrL_mem_1(memory dst, immI_1 shift, rFlagsReg cr) 9124 %{ 9125 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9126 effect(KILL cr); 9127 9128 format %{ "shrq $dst, $shift" %} 9129 ins_encode %{ 9130 __ shrq($dst$$Address, $shift$$constant); 9131 %} 9132 ins_pipe(ialu_mem_imm); 9133 %} 9134 9135 // Logical Shift Right by 8-bit immediate 9136 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 9137 %{ 9138 match(Set dst (URShiftL dst shift)); 9139 effect(KILL cr); 9140 9141 format %{ "shrq $dst, $shift" %} 9142 ins_encode %{ 9143 __ shrq($dst$$Register, $shift$$constant); 9144 %} 9145 ins_pipe(ialu_reg); 9146 %} 9147 9148 // Logical Shift Right by 8-bit immediate 9149 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9150 %{ 9151 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9152 effect(KILL cr); 9153 9154 format %{ "shrq $dst, $shift" %} 9155 ins_encode %{ 9156 __ shrq($dst$$Address, $shift$$constant); 9157 %} 9158 ins_pipe(ialu_mem_imm); 9159 %} 9160 9161 // Logical Shift Right by variable 9162 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9163 %{ 9164 match(Set dst (URShiftL dst shift)); 9165 effect(KILL cr); 9166 9167 format %{ "shrq $dst, $shift" %} 9168 ins_encode %{ 9169 __ shrq($dst$$Register); 9170 %} 9171 ins_pipe(ialu_reg_reg); 9172 %} 9173 9174 // Logical Shift Right by variable 9175 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9176 %{ 9177 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9178 effect(KILL cr); 9179 9180 format %{ "shrq $dst, $shift" %} 9181 ins_encode %{ 9182 __ shrq($dst$$Address); 9183 %} 9184 ins_pipe(ialu_mem_reg); 9185 %} 9186 9187 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24. 9188 // This idiom is used by the compiler for the i2b bytecode. 9189 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour) 9190 %{ 9191 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour)); 9192 9193 format %{ "movsbl $dst, $src\t# i2b" %} 9194 ins_encode %{ 9195 __ movsbl($dst$$Register, $src$$Register); 9196 %} 9197 ins_pipe(ialu_reg_reg); 9198 %} 9199 9200 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16. 9201 // This idiom is used by the compiler the i2s bytecode. 9202 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen) 9203 %{ 9204 match(Set dst (RShiftI (LShiftI src sixteen) sixteen)); 9205 9206 format %{ "movswl $dst, $src\t# i2s" %} 9207 ins_encode %{ 9208 __ movswl($dst$$Register, $src$$Register); 9209 %} 9210 ins_pipe(ialu_reg_reg); 9211 %} 9212 9213 // ROL/ROR instructions 9214 9215 // Rotate left by constant. 9216 instruct rolI_imm(rRegI dst, immI8 shift, rFlagsReg cr) 9217 %{ 9218 predicate(n->bottom_type()->basic_type() == T_INT); 9219 match(Set dst (RotateLeft dst shift)); 9220 effect(KILL cr); 9221 format %{ "roll $dst, $shift" %} 9222 ins_encode %{ 9223 __ roll($dst$$Register, $shift$$constant); 9224 %} 9225 ins_pipe(ialu_reg); 9226 %} 9227 9228 // Rotate Left by variable 9229 instruct rolI_rReg_Var(rRegI dst, rcx_RegI shift, rFlagsReg cr) 9230 %{ 9231 predicate(n->bottom_type()->basic_type() == T_INT); 9232 match(Set dst (RotateLeft dst shift)); 9233 effect(KILL cr); 9234 format %{ "roll $dst, $shift" %} 9235 ins_encode %{ 9236 __ roll($dst$$Register); 9237 %} 9238 ins_pipe(ialu_reg_reg); 9239 %} 9240 9241 // Rotate Right by constant. 9242 instruct rorI_immI8_legacy(rRegI dst, immI8 shift, rFlagsReg cr) 9243 %{ 9244 predicate(!VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_INT); 9245 match(Set dst (RotateRight dst shift)); 9246 effect(KILL cr); 9247 format %{ "rorl $dst, $shift" %} 9248 ins_encode %{ 9249 __ rorl($dst$$Register, $shift$$constant); 9250 %} 9251 ins_pipe(ialu_reg); 9252 %} 9253 9254 // Rotate Right by constant. 9255 instruct rorI_immI8(rRegI dst, immI8 shift) 9256 %{ 9257 predicate(VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_INT); 9258 match(Set dst (RotateRight dst shift)); 9259 format %{ "rorxd $dst, $shift" %} 9260 ins_encode %{ 9261 __ rorxd($dst$$Register, $dst$$Register, $shift$$constant); 9262 %} 9263 ins_pipe(ialu_reg_reg); 9264 %} 9265 9266 // Rotate Right by variable 9267 instruct rorI_rReg_Var(rRegI dst, rcx_RegI shift, rFlagsReg cr) 9268 %{ 9269 predicate(n->bottom_type()->basic_type() == T_INT); 9270 match(Set dst (RotateRight dst shift)); 9271 effect(KILL cr); 9272 format %{ "rorl $dst, $shift" %} 9273 ins_encode %{ 9274 __ rorl($dst$$Register); 9275 %} 9276 ins_pipe(ialu_reg_reg); 9277 %} 9278 9279 9280 // Rotate Left by constant. 9281 instruct rolL_immI8(rRegL dst, immI8 shift, rFlagsReg cr) 9282 %{ 9283 predicate(n->bottom_type()->basic_type() == T_LONG); 9284 match(Set dst (RotateLeft dst shift)); 9285 effect(KILL cr); 9286 format %{ "rolq $dst, $shift" %} 9287 ins_encode %{ 9288 __ rolq($dst$$Register, $shift$$constant); 9289 %} 9290 ins_pipe(ialu_reg); 9291 %} 9292 9293 // Rotate Left by variable 9294 instruct rolL_rReg_Var(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9295 %{ 9296 predicate(n->bottom_type()->basic_type() == T_LONG); 9297 match(Set dst (RotateLeft dst shift)); 9298 effect(KILL cr); 9299 format %{ "rolq $dst, $shift" %} 9300 ins_encode %{ 9301 __ rolq($dst$$Register); 9302 %} 9303 ins_pipe(ialu_reg_reg); 9304 %} 9305 9306 9307 // Rotate Right by constant. 9308 instruct rorL_immI8_legacy(rRegL dst, immI8 shift, rFlagsReg cr) 9309 %{ 9310 predicate(!VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_LONG); 9311 match(Set dst (RotateRight dst shift)); 9312 effect(KILL cr); 9313 format %{ "rorq $dst, $shift" %} 9314 ins_encode %{ 9315 __ rorq($dst$$Register, $shift$$constant); 9316 %} 9317 ins_pipe(ialu_reg); 9318 %} 9319 9320 9321 // Rotate Right by constant 9322 instruct rorL_immI8(rRegL dst, immI8 shift) 9323 %{ 9324 predicate(VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_LONG); 9325 match(Set dst (RotateRight dst shift)); 9326 format %{ "rorxq $dst, $shift" %} 9327 ins_encode %{ 9328 __ rorxq($dst$$Register, $dst$$Register, $shift$$constant); 9329 %} 9330 ins_pipe(ialu_reg_reg); 9331 %} 9332 9333 // Rotate Right by variable 9334 instruct rorL_rReg_Var(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9335 %{ 9336 predicate(n->bottom_type()->basic_type() == T_LONG); 9337 match(Set dst (RotateRight dst shift)); 9338 effect(KILL cr); 9339 format %{ "rorq $dst, $shift" %} 9340 ins_encode %{ 9341 __ rorq($dst$$Register); 9342 %} 9343 ins_pipe(ialu_reg_reg); 9344 %} 9345 9346 9347 // Logical Instructions 9348 9349 // Integer Logical Instructions 9350 9351 // And Instructions 9352 // And Register with Register 9353 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9354 %{ 9355 match(Set dst (AndI dst src)); 9356 effect(KILL cr); 9357 9358 format %{ "andl $dst, $src\t# int" %} 9359 ins_encode %{ 9360 __ andl($dst$$Register, $src$$Register); 9361 %} 9362 ins_pipe(ialu_reg_reg); 9363 %} 9364 9365 // And Register with Immediate 255 9366 instruct andI_rReg_imm255(rRegI dst, immI_255 src) 9367 %{ 9368 match(Set dst (AndI dst src)); 9369 9370 format %{ "movzbl $dst, $dst\t# int & 0xFF" %} 9371 ins_encode %{ 9372 __ movzbl($dst$$Register, $dst$$Register); 9373 %} 9374 ins_pipe(ialu_reg); 9375 %} 9376 9377 // And Register with Immediate 255 and promote to long 9378 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask) 9379 %{ 9380 match(Set dst (ConvI2L (AndI src mask))); 9381 9382 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %} 9383 ins_encode %{ 9384 __ movzbl($dst$$Register, $src$$Register); 9385 %} 9386 ins_pipe(ialu_reg); 9387 %} 9388 9389 // And Register with Immediate 65535 9390 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src) 9391 %{ 9392 match(Set dst (AndI dst src)); 9393 9394 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %} 9395 ins_encode %{ 9396 __ movzwl($dst$$Register, $dst$$Register); 9397 %} 9398 ins_pipe(ialu_reg); 9399 %} 9400 9401 // And Register with Immediate 65535 and promote to long 9402 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask) 9403 %{ 9404 match(Set dst (ConvI2L (AndI src mask))); 9405 9406 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %} 9407 ins_encode %{ 9408 __ movzwl($dst$$Register, $src$$Register); 9409 %} 9410 ins_pipe(ialu_reg); 9411 %} 9412 9413 // Can skip int2long conversions after AND with small bitmask 9414 instruct convI2LAndI_reg_immIbitmask(rRegL dst, rRegI src, immI_Pow2M1 mask, rRegI tmp, rFlagsReg cr) 9415 %{ 9416 predicate(VM_Version::supports_bmi2()); 9417 ins_cost(125); 9418 effect(TEMP tmp, KILL cr); 9419 match(Set dst (ConvI2L (AndI src mask))); 9420 format %{ "bzhiq $dst, $src, $mask \t# using $tmp as TEMP, int & immI_Pow2M1 -> long" %} 9421 ins_encode %{ 9422 __ movl($tmp$$Register, exact_log2($mask$$constant + 1)); 9423 __ bzhiq($dst$$Register, $src$$Register, $tmp$$Register); 9424 %} 9425 ins_pipe(ialu_reg_reg); 9426 %} 9427 9428 // And Register with Immediate 9429 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9430 %{ 9431 match(Set dst (AndI dst src)); 9432 effect(KILL cr); 9433 9434 format %{ "andl $dst, $src\t# int" %} 9435 ins_encode %{ 9436 __ andl($dst$$Register, $src$$constant); 9437 %} 9438 ins_pipe(ialu_reg); 9439 %} 9440 9441 // And Register with Memory 9442 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9443 %{ 9444 match(Set dst (AndI dst (LoadI src))); 9445 effect(KILL cr); 9446 9447 ins_cost(125); 9448 format %{ "andl $dst, $src\t# int" %} 9449 ins_encode %{ 9450 __ andl($dst$$Register, $src$$Address); 9451 %} 9452 ins_pipe(ialu_reg_mem); 9453 %} 9454 9455 // And Memory with Register 9456 instruct andB_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9457 %{ 9458 match(Set dst (StoreB dst (AndI (LoadB dst) src))); 9459 effect(KILL cr); 9460 9461 ins_cost(150); 9462 format %{ "andb $dst, $src\t# byte" %} 9463 ins_encode %{ 9464 __ andb($dst$$Address, $src$$Register); 9465 %} 9466 ins_pipe(ialu_mem_reg); 9467 %} 9468 9469 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9470 %{ 9471 match(Set dst (StoreI dst (AndI (LoadI dst) src))); 9472 effect(KILL cr); 9473 9474 ins_cost(150); 9475 format %{ "andl $dst, $src\t# int" %} 9476 ins_encode %{ 9477 __ andl($dst$$Address, $src$$Register); 9478 %} 9479 ins_pipe(ialu_mem_reg); 9480 %} 9481 9482 // And Memory with Immediate 9483 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr) 9484 %{ 9485 match(Set dst (StoreI dst (AndI (LoadI dst) src))); 9486 effect(KILL cr); 9487 9488 ins_cost(125); 9489 format %{ "andl $dst, $src\t# int" %} 9490 ins_encode %{ 9491 __ andl($dst$$Address, $src$$constant); 9492 %} 9493 ins_pipe(ialu_mem_imm); 9494 %} 9495 9496 // BMI1 instructions 9497 instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, rFlagsReg cr) %{ 9498 match(Set dst (AndI (XorI src1 minus_1) (LoadI src2))); 9499 predicate(UseBMI1Instructions); 9500 effect(KILL cr); 9501 9502 ins_cost(125); 9503 format %{ "andnl $dst, $src1, $src2" %} 9504 9505 ins_encode %{ 9506 __ andnl($dst$$Register, $src1$$Register, $src2$$Address); 9507 %} 9508 ins_pipe(ialu_reg_mem); 9509 %} 9510 9511 instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, rFlagsReg cr) %{ 9512 match(Set dst (AndI (XorI src1 minus_1) src2)); 9513 predicate(UseBMI1Instructions); 9514 effect(KILL cr); 9515 9516 format %{ "andnl $dst, $src1, $src2" %} 9517 9518 ins_encode %{ 9519 __ andnl($dst$$Register, $src1$$Register, $src2$$Register); 9520 %} 9521 ins_pipe(ialu_reg); 9522 %} 9523 9524 instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI_0 imm_zero, rFlagsReg cr) %{ 9525 match(Set dst (AndI (SubI imm_zero src) src)); 9526 predicate(UseBMI1Instructions); 9527 effect(KILL cr); 9528 9529 format %{ "blsil $dst, $src" %} 9530 9531 ins_encode %{ 9532 __ blsil($dst$$Register, $src$$Register); 9533 %} 9534 ins_pipe(ialu_reg); 9535 %} 9536 9537 instruct blsiI_rReg_mem(rRegI dst, memory src, immI_0 imm_zero, rFlagsReg cr) %{ 9538 match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) )); 9539 predicate(UseBMI1Instructions); 9540 effect(KILL cr); 9541 9542 ins_cost(125); 9543 format %{ "blsil $dst, $src" %} 9544 9545 ins_encode %{ 9546 __ blsil($dst$$Register, $src$$Address); 9547 %} 9548 ins_pipe(ialu_reg_mem); 9549 %} 9550 9551 instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr) 9552 %{ 9553 match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ) ); 9554 predicate(UseBMI1Instructions); 9555 effect(KILL cr); 9556 9557 ins_cost(125); 9558 format %{ "blsmskl $dst, $src" %} 9559 9560 ins_encode %{ 9561 __ blsmskl($dst$$Register, $src$$Address); 9562 %} 9563 ins_pipe(ialu_reg_mem); 9564 %} 9565 9566 instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr) 9567 %{ 9568 match(Set dst (XorI (AddI src minus_1) src)); 9569 predicate(UseBMI1Instructions); 9570 effect(KILL cr); 9571 9572 format %{ "blsmskl $dst, $src" %} 9573 9574 ins_encode %{ 9575 __ blsmskl($dst$$Register, $src$$Register); 9576 %} 9577 9578 ins_pipe(ialu_reg); 9579 %} 9580 9581 instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr) 9582 %{ 9583 match(Set dst (AndI (AddI src minus_1) src) ); 9584 predicate(UseBMI1Instructions); 9585 effect(KILL cr); 9586 9587 format %{ "blsrl $dst, $src" %} 9588 9589 ins_encode %{ 9590 __ blsrl($dst$$Register, $src$$Register); 9591 %} 9592 9593 ins_pipe(ialu_reg_mem); 9594 %} 9595 9596 instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr) 9597 %{ 9598 match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ) ); 9599 predicate(UseBMI1Instructions); 9600 effect(KILL cr); 9601 9602 ins_cost(125); 9603 format %{ "blsrl $dst, $src" %} 9604 9605 ins_encode %{ 9606 __ blsrl($dst$$Register, $src$$Address); 9607 %} 9608 9609 ins_pipe(ialu_reg); 9610 %} 9611 9612 // Or Instructions 9613 // Or Register with Register 9614 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9615 %{ 9616 match(Set dst (OrI dst src)); 9617 effect(KILL cr); 9618 9619 format %{ "orl $dst, $src\t# int" %} 9620 ins_encode %{ 9621 __ orl($dst$$Register, $src$$Register); 9622 %} 9623 ins_pipe(ialu_reg_reg); 9624 %} 9625 9626 // Or Register with Immediate 9627 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9628 %{ 9629 match(Set dst (OrI dst src)); 9630 effect(KILL cr); 9631 9632 format %{ "orl $dst, $src\t# int" %} 9633 ins_encode %{ 9634 __ orl($dst$$Register, $src$$constant); 9635 %} 9636 ins_pipe(ialu_reg); 9637 %} 9638 9639 // Or Register with Memory 9640 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9641 %{ 9642 match(Set dst (OrI dst (LoadI src))); 9643 effect(KILL cr); 9644 9645 ins_cost(125); 9646 format %{ "orl $dst, $src\t# int" %} 9647 ins_encode %{ 9648 __ orl($dst$$Register, $src$$Address); 9649 %} 9650 ins_pipe(ialu_reg_mem); 9651 %} 9652 9653 // Or Memory with Register 9654 instruct orB_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9655 %{ 9656 match(Set dst (StoreB dst (OrI (LoadB dst) src))); 9657 effect(KILL cr); 9658 9659 ins_cost(150); 9660 format %{ "orb $dst, $src\t# byte" %} 9661 ins_encode %{ 9662 __ orb($dst$$Address, $src$$Register); 9663 %} 9664 ins_pipe(ialu_mem_reg); 9665 %} 9666 9667 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9668 %{ 9669 match(Set dst (StoreI dst (OrI (LoadI dst) src))); 9670 effect(KILL cr); 9671 9672 ins_cost(150); 9673 format %{ "orl $dst, $src\t# int" %} 9674 ins_encode %{ 9675 __ orl($dst$$Address, $src$$Register); 9676 %} 9677 ins_pipe(ialu_mem_reg); 9678 %} 9679 9680 // Or Memory with Immediate 9681 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr) 9682 %{ 9683 match(Set dst (StoreI dst (OrI (LoadI dst) src))); 9684 effect(KILL cr); 9685 9686 ins_cost(125); 9687 format %{ "orl $dst, $src\t# int" %} 9688 ins_encode %{ 9689 __ orl($dst$$Address, $src$$constant); 9690 %} 9691 ins_pipe(ialu_mem_imm); 9692 %} 9693 9694 // Xor Instructions 9695 // Xor Register with Register 9696 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9697 %{ 9698 match(Set dst (XorI dst src)); 9699 effect(KILL cr); 9700 9701 format %{ "xorl $dst, $src\t# int" %} 9702 ins_encode %{ 9703 __ xorl($dst$$Register, $src$$Register); 9704 %} 9705 ins_pipe(ialu_reg_reg); 9706 %} 9707 9708 // Xor Register with Immediate -1 9709 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{ 9710 match(Set dst (XorI dst imm)); 9711 9712 format %{ "not $dst" %} 9713 ins_encode %{ 9714 __ notl($dst$$Register); 9715 %} 9716 ins_pipe(ialu_reg); 9717 %} 9718 9719 // Xor Register with Immediate 9720 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9721 %{ 9722 match(Set dst (XorI dst src)); 9723 effect(KILL cr); 9724 9725 format %{ "xorl $dst, $src\t# int" %} 9726 ins_encode %{ 9727 __ xorl($dst$$Register, $src$$constant); 9728 %} 9729 ins_pipe(ialu_reg); 9730 %} 9731 9732 // Xor Register with Memory 9733 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9734 %{ 9735 match(Set dst (XorI dst (LoadI src))); 9736 effect(KILL cr); 9737 9738 ins_cost(125); 9739 format %{ "xorl $dst, $src\t# int" %} 9740 ins_encode %{ 9741 __ xorl($dst$$Register, $src$$Address); 9742 %} 9743 ins_pipe(ialu_reg_mem); 9744 %} 9745 9746 // Xor Memory with Register 9747 instruct xorB_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9748 %{ 9749 match(Set dst (StoreB dst (XorI (LoadB dst) src))); 9750 effect(KILL cr); 9751 9752 ins_cost(150); 9753 format %{ "xorb $dst, $src\t# byte" %} 9754 ins_encode %{ 9755 __ xorb($dst$$Address, $src$$Register); 9756 %} 9757 ins_pipe(ialu_mem_reg); 9758 %} 9759 9760 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9761 %{ 9762 match(Set dst (StoreI dst (XorI (LoadI dst) src))); 9763 effect(KILL cr); 9764 9765 ins_cost(150); 9766 format %{ "xorl $dst, $src\t# int" %} 9767 ins_encode %{ 9768 __ xorl($dst$$Address, $src$$Register); 9769 %} 9770 ins_pipe(ialu_mem_reg); 9771 %} 9772 9773 // Xor Memory with Immediate 9774 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr) 9775 %{ 9776 match(Set dst (StoreI dst (XorI (LoadI dst) src))); 9777 effect(KILL cr); 9778 9779 ins_cost(125); 9780 format %{ "xorl $dst, $src\t# int" %} 9781 ins_encode %{ 9782 __ xorl($dst$$Address, $src$$constant); 9783 %} 9784 ins_pipe(ialu_mem_imm); 9785 %} 9786 9787 9788 // Long Logical Instructions 9789 9790 // And Instructions 9791 // And Register with Register 9792 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 9793 %{ 9794 match(Set dst (AndL dst src)); 9795 effect(KILL cr); 9796 9797 format %{ "andq $dst, $src\t# long" %} 9798 ins_encode %{ 9799 __ andq($dst$$Register, $src$$Register); 9800 %} 9801 ins_pipe(ialu_reg_reg); 9802 %} 9803 9804 // And Register with Immediate 255 9805 instruct andL_rReg_imm255(rRegL dst, immL_255 src) 9806 %{ 9807 match(Set dst (AndL dst src)); 9808 9809 format %{ "movzbq $dst, $dst\t# long & 0xFF" %} 9810 ins_encode %{ 9811 __ movzbq($dst$$Register, $dst$$Register); 9812 %} 9813 ins_pipe(ialu_reg); 9814 %} 9815 9816 // And Register with Immediate 65535 9817 instruct andL_rReg_imm65535(rRegL dst, immL_65535 src) 9818 %{ 9819 match(Set dst (AndL dst src)); 9820 9821 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %} 9822 ins_encode %{ 9823 __ movzwq($dst$$Register, $dst$$Register); 9824 %} 9825 ins_pipe(ialu_reg); 9826 %} 9827 9828 // And Register with Immediate 9829 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 9830 %{ 9831 match(Set dst (AndL dst src)); 9832 effect(KILL cr); 9833 9834 format %{ "andq $dst, $src\t# long" %} 9835 ins_encode %{ 9836 __ andq($dst$$Register, $src$$constant); 9837 %} 9838 ins_pipe(ialu_reg); 9839 %} 9840 9841 // And Register with Memory 9842 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 9843 %{ 9844 match(Set dst (AndL dst (LoadL src))); 9845 effect(KILL cr); 9846 9847 ins_cost(125); 9848 format %{ "andq $dst, $src\t# long" %} 9849 ins_encode %{ 9850 __ andq($dst$$Register, $src$$Address); 9851 %} 9852 ins_pipe(ialu_reg_mem); 9853 %} 9854 9855 // And Memory with Register 9856 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 9857 %{ 9858 match(Set dst (StoreL dst (AndL (LoadL dst) src))); 9859 effect(KILL cr); 9860 9861 ins_cost(150); 9862 format %{ "andq $dst, $src\t# long" %} 9863 ins_encode %{ 9864 __ andq($dst$$Address, $src$$Register); 9865 %} 9866 ins_pipe(ialu_mem_reg); 9867 %} 9868 9869 // And Memory with Immediate 9870 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 9871 %{ 9872 match(Set dst (StoreL dst (AndL (LoadL dst) src))); 9873 effect(KILL cr); 9874 9875 ins_cost(125); 9876 format %{ "andq $dst, $src\t# long" %} 9877 ins_encode %{ 9878 __ andq($dst$$Address, $src$$constant); 9879 %} 9880 ins_pipe(ialu_mem_imm); 9881 %} 9882 9883 instruct btrL_mem_imm(memory dst, immL_NotPow2 con, rFlagsReg cr) 9884 %{ 9885 // con should be a pure 64-bit immediate given that not(con) is a power of 2 9886 // because AND/OR works well enough for 8/32-bit values. 9887 predicate(log2i_graceful(~n->in(3)->in(2)->get_long()) > 30); 9888 9889 match(Set dst (StoreL dst (AndL (LoadL dst) con))); 9890 effect(KILL cr); 9891 9892 ins_cost(125); 9893 format %{ "btrq $dst, log2(not($con))\t# long" %} 9894 ins_encode %{ 9895 __ btrq($dst$$Address, log2i_exact((julong)~$con$$constant)); 9896 %} 9897 ins_pipe(ialu_mem_imm); 9898 %} 9899 9900 // BMI1 instructions 9901 instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{ 9902 match(Set dst (AndL (XorL src1 minus_1) (LoadL src2))); 9903 predicate(UseBMI1Instructions); 9904 effect(KILL cr); 9905 9906 ins_cost(125); 9907 format %{ "andnq $dst, $src1, $src2" %} 9908 9909 ins_encode %{ 9910 __ andnq($dst$$Register, $src1$$Register, $src2$$Address); 9911 %} 9912 ins_pipe(ialu_reg_mem); 9913 %} 9914 9915 instruct andnL_rReg_rReg_rReg(rRegL dst, rRegL src1, rRegL src2, immL_M1 minus_1, rFlagsReg cr) %{ 9916 match(Set dst (AndL (XorL src1 minus_1) src2)); 9917 predicate(UseBMI1Instructions); 9918 effect(KILL cr); 9919 9920 format %{ "andnq $dst, $src1, $src2" %} 9921 9922 ins_encode %{ 9923 __ andnq($dst$$Register, $src1$$Register, $src2$$Register); 9924 %} 9925 ins_pipe(ialu_reg_mem); 9926 %} 9927 9928 instruct blsiL_rReg_rReg(rRegL dst, rRegL src, immL0 imm_zero, rFlagsReg cr) %{ 9929 match(Set dst (AndL (SubL imm_zero src) src)); 9930 predicate(UseBMI1Instructions); 9931 effect(KILL cr); 9932 9933 format %{ "blsiq $dst, $src" %} 9934 9935 ins_encode %{ 9936 __ blsiq($dst$$Register, $src$$Register); 9937 %} 9938 ins_pipe(ialu_reg); 9939 %} 9940 9941 instruct blsiL_rReg_mem(rRegL dst, memory src, immL0 imm_zero, rFlagsReg cr) %{ 9942 match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) )); 9943 predicate(UseBMI1Instructions); 9944 effect(KILL cr); 9945 9946 ins_cost(125); 9947 format %{ "blsiq $dst, $src" %} 9948 9949 ins_encode %{ 9950 __ blsiq($dst$$Register, $src$$Address); 9951 %} 9952 ins_pipe(ialu_reg_mem); 9953 %} 9954 9955 instruct blsmskL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr) 9956 %{ 9957 match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ) ); 9958 predicate(UseBMI1Instructions); 9959 effect(KILL cr); 9960 9961 ins_cost(125); 9962 format %{ "blsmskq $dst, $src" %} 9963 9964 ins_encode %{ 9965 __ blsmskq($dst$$Register, $src$$Address); 9966 %} 9967 ins_pipe(ialu_reg_mem); 9968 %} 9969 9970 instruct blsmskL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr) 9971 %{ 9972 match(Set dst (XorL (AddL src minus_1) src)); 9973 predicate(UseBMI1Instructions); 9974 effect(KILL cr); 9975 9976 format %{ "blsmskq $dst, $src" %} 9977 9978 ins_encode %{ 9979 __ blsmskq($dst$$Register, $src$$Register); 9980 %} 9981 9982 ins_pipe(ialu_reg); 9983 %} 9984 9985 instruct blsrL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr) 9986 %{ 9987 match(Set dst (AndL (AddL src minus_1) src) ); 9988 predicate(UseBMI1Instructions); 9989 effect(KILL cr); 9990 9991 format %{ "blsrq $dst, $src" %} 9992 9993 ins_encode %{ 9994 __ blsrq($dst$$Register, $src$$Register); 9995 %} 9996 9997 ins_pipe(ialu_reg); 9998 %} 9999 10000 instruct blsrL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr) 10001 %{ 10002 match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src)) ); 10003 predicate(UseBMI1Instructions); 10004 effect(KILL cr); 10005 10006 ins_cost(125); 10007 format %{ "blsrq $dst, $src" %} 10008 10009 ins_encode %{ 10010 __ blsrq($dst$$Register, $src$$Address); 10011 %} 10012 10013 ins_pipe(ialu_reg); 10014 %} 10015 10016 // Or Instructions 10017 // Or Register with Register 10018 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 10019 %{ 10020 match(Set dst (OrL dst src)); 10021 effect(KILL cr); 10022 10023 format %{ "orq $dst, $src\t# long" %} 10024 ins_encode %{ 10025 __ orq($dst$$Register, $src$$Register); 10026 %} 10027 ins_pipe(ialu_reg_reg); 10028 %} 10029 10030 // Use any_RegP to match R15 (TLS register) without spilling. 10031 instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{ 10032 match(Set dst (OrL dst (CastP2X src))); 10033 effect(KILL cr); 10034 10035 format %{ "orq $dst, $src\t# long" %} 10036 ins_encode %{ 10037 __ orq($dst$$Register, $src$$Register); 10038 %} 10039 ins_pipe(ialu_reg_reg); 10040 %} 10041 10042 10043 // Or Register with Immediate 10044 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 10045 %{ 10046 match(Set dst (OrL dst src)); 10047 effect(KILL cr); 10048 10049 format %{ "orq $dst, $src\t# long" %} 10050 ins_encode %{ 10051 __ orq($dst$$Register, $src$$constant); 10052 %} 10053 ins_pipe(ialu_reg); 10054 %} 10055 10056 // Or Register with Memory 10057 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 10058 %{ 10059 match(Set dst (OrL dst (LoadL src))); 10060 effect(KILL cr); 10061 10062 ins_cost(125); 10063 format %{ "orq $dst, $src\t# long" %} 10064 ins_encode %{ 10065 __ orq($dst$$Register, $src$$Address); 10066 %} 10067 ins_pipe(ialu_reg_mem); 10068 %} 10069 10070 // Or Memory with Register 10071 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 10072 %{ 10073 match(Set dst (StoreL dst (OrL (LoadL dst) src))); 10074 effect(KILL cr); 10075 10076 ins_cost(150); 10077 format %{ "orq $dst, $src\t# long" %} 10078 ins_encode %{ 10079 __ orq($dst$$Address, $src$$Register); 10080 %} 10081 ins_pipe(ialu_mem_reg); 10082 %} 10083 10084 // Or Memory with Immediate 10085 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 10086 %{ 10087 match(Set dst (StoreL dst (OrL (LoadL dst) src))); 10088 effect(KILL cr); 10089 10090 ins_cost(125); 10091 format %{ "orq $dst, $src\t# long" %} 10092 ins_encode %{ 10093 __ orq($dst$$Address, $src$$constant); 10094 %} 10095 ins_pipe(ialu_mem_imm); 10096 %} 10097 10098 instruct btsL_mem_imm(memory dst, immL_Pow2 con, rFlagsReg cr) 10099 %{ 10100 // con should be a pure 64-bit power of 2 immediate 10101 // because AND/OR works well enough for 8/32-bit values. 10102 predicate(log2i_graceful(n->in(3)->in(2)->get_long()) > 31); 10103 10104 match(Set dst (StoreL dst (OrL (LoadL dst) con))); 10105 effect(KILL cr); 10106 10107 ins_cost(125); 10108 format %{ "btsq $dst, log2($con)\t# long" %} 10109 ins_encode %{ 10110 __ btsq($dst$$Address, log2i_exact((julong)$con$$constant)); 10111 %} 10112 ins_pipe(ialu_mem_imm); 10113 %} 10114 10115 // Xor Instructions 10116 // Xor Register with Register 10117 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 10118 %{ 10119 match(Set dst (XorL dst src)); 10120 effect(KILL cr); 10121 10122 format %{ "xorq $dst, $src\t# long" %} 10123 ins_encode %{ 10124 __ xorq($dst$$Register, $src$$Register); 10125 %} 10126 ins_pipe(ialu_reg_reg); 10127 %} 10128 10129 // Xor Register with Immediate -1 10130 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{ 10131 match(Set dst (XorL dst imm)); 10132 10133 format %{ "notq $dst" %} 10134 ins_encode %{ 10135 __ notq($dst$$Register); 10136 %} 10137 ins_pipe(ialu_reg); 10138 %} 10139 10140 // Xor Register with Immediate 10141 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 10142 %{ 10143 match(Set dst (XorL dst src)); 10144 effect(KILL cr); 10145 10146 format %{ "xorq $dst, $src\t# long" %} 10147 ins_encode %{ 10148 __ xorq($dst$$Register, $src$$constant); 10149 %} 10150 ins_pipe(ialu_reg); 10151 %} 10152 10153 // Xor Register with Memory 10154 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 10155 %{ 10156 match(Set dst (XorL dst (LoadL src))); 10157 effect(KILL cr); 10158 10159 ins_cost(125); 10160 format %{ "xorq $dst, $src\t# long" %} 10161 ins_encode %{ 10162 __ xorq($dst$$Register, $src$$Address); 10163 %} 10164 ins_pipe(ialu_reg_mem); 10165 %} 10166 10167 // Xor Memory with Register 10168 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 10169 %{ 10170 match(Set dst (StoreL dst (XorL (LoadL dst) src))); 10171 effect(KILL cr); 10172 10173 ins_cost(150); 10174 format %{ "xorq $dst, $src\t# long" %} 10175 ins_encode %{ 10176 __ xorq($dst$$Address, $src$$Register); 10177 %} 10178 ins_pipe(ialu_mem_reg); 10179 %} 10180 10181 // Xor Memory with Immediate 10182 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 10183 %{ 10184 match(Set dst (StoreL dst (XorL (LoadL dst) src))); 10185 effect(KILL cr); 10186 10187 ins_cost(125); 10188 format %{ "xorq $dst, $src\t# long" %} 10189 ins_encode %{ 10190 __ xorq($dst$$Address, $src$$constant); 10191 %} 10192 ins_pipe(ialu_mem_imm); 10193 %} 10194 10195 // Convert Int to Boolean 10196 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr) 10197 %{ 10198 match(Set dst (Conv2B src)); 10199 effect(KILL cr); 10200 10201 format %{ "testl $src, $src\t# ci2b\n\t" 10202 "setnz $dst\n\t" 10203 "movzbl $dst, $dst" %} 10204 ins_encode %{ 10205 __ testl($src$$Register, $src$$Register); 10206 __ set_byte_if_not_zero($dst$$Register); 10207 __ movzbl($dst$$Register, $dst$$Register); 10208 %} 10209 ins_pipe(pipe_slow); // XXX 10210 %} 10211 10212 // Convert Pointer to Boolean 10213 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr) 10214 %{ 10215 match(Set dst (Conv2B src)); 10216 effect(KILL cr); 10217 10218 format %{ "testq $src, $src\t# cp2b\n\t" 10219 "setnz $dst\n\t" 10220 "movzbl $dst, $dst" %} 10221 ins_encode %{ 10222 __ testq($src$$Register, $src$$Register); 10223 __ set_byte_if_not_zero($dst$$Register); 10224 __ movzbl($dst$$Register, $dst$$Register); 10225 %} 10226 ins_pipe(pipe_slow); // XXX 10227 %} 10228 10229 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr) 10230 %{ 10231 match(Set dst (CmpLTMask p q)); 10232 effect(KILL cr); 10233 10234 ins_cost(400); 10235 format %{ "cmpl $p, $q\t# cmpLTMask\n\t" 10236 "setlt $dst\n\t" 10237 "movzbl $dst, $dst\n\t" 10238 "negl $dst" %} 10239 ins_encode %{ 10240 __ cmpl($p$$Register, $q$$Register); 10241 __ setl($dst$$Register); 10242 __ movzbl($dst$$Register, $dst$$Register); 10243 __ negl($dst$$Register); 10244 %} 10245 ins_pipe(pipe_slow); 10246 %} 10247 10248 instruct cmpLTMask0(rRegI dst, immI_0 zero, rFlagsReg cr) 10249 %{ 10250 match(Set dst (CmpLTMask dst zero)); 10251 effect(KILL cr); 10252 10253 ins_cost(100); 10254 format %{ "sarl $dst, #31\t# cmpLTMask0" %} 10255 ins_encode %{ 10256 __ sarl($dst$$Register, 31); 10257 %} 10258 ins_pipe(ialu_reg); 10259 %} 10260 10261 /* Better to save a register than avoid a branch */ 10262 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) 10263 %{ 10264 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 10265 effect(KILL cr); 10266 ins_cost(300); 10267 format %{ "subl $p,$q\t# cadd_cmpLTMask\n\t" 10268 "jge done\n\t" 10269 "addl $p,$y\n" 10270 "done: " %} 10271 ins_encode %{ 10272 Register Rp = $p$$Register; 10273 Register Rq = $q$$Register; 10274 Register Ry = $y$$Register; 10275 Label done; 10276 __ subl(Rp, Rq); 10277 __ jccb(Assembler::greaterEqual, done); 10278 __ addl(Rp, Ry); 10279 __ bind(done); 10280 %} 10281 ins_pipe(pipe_cmplt); 10282 %} 10283 10284 /* Better to save a register than avoid a branch */ 10285 instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) 10286 %{ 10287 match(Set y (AndI (CmpLTMask p q) y)); 10288 effect(KILL cr); 10289 10290 ins_cost(300); 10291 10292 format %{ "cmpl $p, $q\t# and_cmpLTMask\n\t" 10293 "jlt done\n\t" 10294 "xorl $y, $y\n" 10295 "done: " %} 10296 ins_encode %{ 10297 Register Rp = $p$$Register; 10298 Register Rq = $q$$Register; 10299 Register Ry = $y$$Register; 10300 Label done; 10301 __ cmpl(Rp, Rq); 10302 __ jccb(Assembler::less, done); 10303 __ xorl(Ry, Ry); 10304 __ bind(done); 10305 %} 10306 ins_pipe(pipe_cmplt); 10307 %} 10308 10309 10310 //---------- FP Instructions------------------------------------------------ 10311 10312 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) 10313 %{ 10314 match(Set cr (CmpF src1 src2)); 10315 10316 ins_cost(145); 10317 format %{ "ucomiss $src1, $src2\n\t" 10318 "jnp,s exit\n\t" 10319 "pushfq\t# saw NaN, set CF\n\t" 10320 "andq [rsp], #0xffffff2b\n\t" 10321 "popfq\n" 10322 "exit:" %} 10323 ins_encode %{ 10324 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister); 10325 emit_cmpfp_fixup(_masm); 10326 %} 10327 ins_pipe(pipe_slow); 10328 %} 10329 10330 instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{ 10331 match(Set cr (CmpF src1 src2)); 10332 10333 ins_cost(100); 10334 format %{ "ucomiss $src1, $src2" %} 10335 ins_encode %{ 10336 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister); 10337 %} 10338 ins_pipe(pipe_slow); 10339 %} 10340 10341 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2) 10342 %{ 10343 match(Set cr (CmpF src1 (LoadF src2))); 10344 10345 ins_cost(145); 10346 format %{ "ucomiss $src1, $src2\n\t" 10347 "jnp,s exit\n\t" 10348 "pushfq\t# saw NaN, set CF\n\t" 10349 "andq [rsp], #0xffffff2b\n\t" 10350 "popfq\n" 10351 "exit:" %} 10352 ins_encode %{ 10353 __ ucomiss($src1$$XMMRegister, $src2$$Address); 10354 emit_cmpfp_fixup(_masm); 10355 %} 10356 ins_pipe(pipe_slow); 10357 %} 10358 10359 instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{ 10360 match(Set cr (CmpF src1 (LoadF src2))); 10361 10362 ins_cost(100); 10363 format %{ "ucomiss $src1, $src2" %} 10364 ins_encode %{ 10365 __ ucomiss($src1$$XMMRegister, $src2$$Address); 10366 %} 10367 ins_pipe(pipe_slow); 10368 %} 10369 10370 instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{ 10371 match(Set cr (CmpF src con)); 10372 10373 ins_cost(145); 10374 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10375 "jnp,s exit\n\t" 10376 "pushfq\t# saw NaN, set CF\n\t" 10377 "andq [rsp], #0xffffff2b\n\t" 10378 "popfq\n" 10379 "exit:" %} 10380 ins_encode %{ 10381 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10382 emit_cmpfp_fixup(_masm); 10383 %} 10384 ins_pipe(pipe_slow); 10385 %} 10386 10387 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{ 10388 match(Set cr (CmpF src con)); 10389 ins_cost(100); 10390 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %} 10391 ins_encode %{ 10392 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10393 %} 10394 ins_pipe(pipe_slow); 10395 %} 10396 10397 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2) 10398 %{ 10399 match(Set cr (CmpD src1 src2)); 10400 10401 ins_cost(145); 10402 format %{ "ucomisd $src1, $src2\n\t" 10403 "jnp,s exit\n\t" 10404 "pushfq\t# saw NaN, set CF\n\t" 10405 "andq [rsp], #0xffffff2b\n\t" 10406 "popfq\n" 10407 "exit:" %} 10408 ins_encode %{ 10409 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister); 10410 emit_cmpfp_fixup(_masm); 10411 %} 10412 ins_pipe(pipe_slow); 10413 %} 10414 10415 instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{ 10416 match(Set cr (CmpD src1 src2)); 10417 10418 ins_cost(100); 10419 format %{ "ucomisd $src1, $src2 test" %} 10420 ins_encode %{ 10421 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister); 10422 %} 10423 ins_pipe(pipe_slow); 10424 %} 10425 10426 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2) 10427 %{ 10428 match(Set cr (CmpD src1 (LoadD src2))); 10429 10430 ins_cost(145); 10431 format %{ "ucomisd $src1, $src2\n\t" 10432 "jnp,s exit\n\t" 10433 "pushfq\t# saw NaN, set CF\n\t" 10434 "andq [rsp], #0xffffff2b\n\t" 10435 "popfq\n" 10436 "exit:" %} 10437 ins_encode %{ 10438 __ ucomisd($src1$$XMMRegister, $src2$$Address); 10439 emit_cmpfp_fixup(_masm); 10440 %} 10441 ins_pipe(pipe_slow); 10442 %} 10443 10444 instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{ 10445 match(Set cr (CmpD src1 (LoadD src2))); 10446 10447 ins_cost(100); 10448 format %{ "ucomisd $src1, $src2" %} 10449 ins_encode %{ 10450 __ ucomisd($src1$$XMMRegister, $src2$$Address); 10451 %} 10452 ins_pipe(pipe_slow); 10453 %} 10454 10455 instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{ 10456 match(Set cr (CmpD src con)); 10457 10458 ins_cost(145); 10459 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10460 "jnp,s exit\n\t" 10461 "pushfq\t# saw NaN, set CF\n\t" 10462 "andq [rsp], #0xffffff2b\n\t" 10463 "popfq\n" 10464 "exit:" %} 10465 ins_encode %{ 10466 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10467 emit_cmpfp_fixup(_masm); 10468 %} 10469 ins_pipe(pipe_slow); 10470 %} 10471 10472 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{ 10473 match(Set cr (CmpD src con)); 10474 ins_cost(100); 10475 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %} 10476 ins_encode %{ 10477 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10478 %} 10479 ins_pipe(pipe_slow); 10480 %} 10481 10482 // Compare into -1,0,1 10483 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr) 10484 %{ 10485 match(Set dst (CmpF3 src1 src2)); 10486 effect(KILL cr); 10487 10488 ins_cost(275); 10489 format %{ "ucomiss $src1, $src2\n\t" 10490 "movl $dst, #-1\n\t" 10491 "jp,s done\n\t" 10492 "jb,s done\n\t" 10493 "setne $dst\n\t" 10494 "movzbl $dst, $dst\n" 10495 "done:" %} 10496 ins_encode %{ 10497 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister); 10498 emit_cmpfp3(_masm, $dst$$Register); 10499 %} 10500 ins_pipe(pipe_slow); 10501 %} 10502 10503 // Compare into -1,0,1 10504 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr) 10505 %{ 10506 match(Set dst (CmpF3 src1 (LoadF src2))); 10507 effect(KILL cr); 10508 10509 ins_cost(275); 10510 format %{ "ucomiss $src1, $src2\n\t" 10511 "movl $dst, #-1\n\t" 10512 "jp,s done\n\t" 10513 "jb,s done\n\t" 10514 "setne $dst\n\t" 10515 "movzbl $dst, $dst\n" 10516 "done:" %} 10517 ins_encode %{ 10518 __ ucomiss($src1$$XMMRegister, $src2$$Address); 10519 emit_cmpfp3(_masm, $dst$$Register); 10520 %} 10521 ins_pipe(pipe_slow); 10522 %} 10523 10524 // Compare into -1,0,1 10525 instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{ 10526 match(Set dst (CmpF3 src con)); 10527 effect(KILL cr); 10528 10529 ins_cost(275); 10530 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10531 "movl $dst, #-1\n\t" 10532 "jp,s done\n\t" 10533 "jb,s done\n\t" 10534 "setne $dst\n\t" 10535 "movzbl $dst, $dst\n" 10536 "done:" %} 10537 ins_encode %{ 10538 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10539 emit_cmpfp3(_masm, $dst$$Register); 10540 %} 10541 ins_pipe(pipe_slow); 10542 %} 10543 10544 // Compare into -1,0,1 10545 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr) 10546 %{ 10547 match(Set dst (CmpD3 src1 src2)); 10548 effect(KILL cr); 10549 10550 ins_cost(275); 10551 format %{ "ucomisd $src1, $src2\n\t" 10552 "movl $dst, #-1\n\t" 10553 "jp,s done\n\t" 10554 "jb,s done\n\t" 10555 "setne $dst\n\t" 10556 "movzbl $dst, $dst\n" 10557 "done:" %} 10558 ins_encode %{ 10559 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister); 10560 emit_cmpfp3(_masm, $dst$$Register); 10561 %} 10562 ins_pipe(pipe_slow); 10563 %} 10564 10565 // Compare into -1,0,1 10566 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr) 10567 %{ 10568 match(Set dst (CmpD3 src1 (LoadD src2))); 10569 effect(KILL cr); 10570 10571 ins_cost(275); 10572 format %{ "ucomisd $src1, $src2\n\t" 10573 "movl $dst, #-1\n\t" 10574 "jp,s done\n\t" 10575 "jb,s done\n\t" 10576 "setne $dst\n\t" 10577 "movzbl $dst, $dst\n" 10578 "done:" %} 10579 ins_encode %{ 10580 __ ucomisd($src1$$XMMRegister, $src2$$Address); 10581 emit_cmpfp3(_masm, $dst$$Register); 10582 %} 10583 ins_pipe(pipe_slow); 10584 %} 10585 10586 // Compare into -1,0,1 10587 instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{ 10588 match(Set dst (CmpD3 src con)); 10589 effect(KILL cr); 10590 10591 ins_cost(275); 10592 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10593 "movl $dst, #-1\n\t" 10594 "jp,s done\n\t" 10595 "jb,s done\n\t" 10596 "setne $dst\n\t" 10597 "movzbl $dst, $dst\n" 10598 "done:" %} 10599 ins_encode %{ 10600 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10601 emit_cmpfp3(_masm, $dst$$Register); 10602 %} 10603 ins_pipe(pipe_slow); 10604 %} 10605 10606 //----------Arithmetic Conversion Instructions--------------------------------- 10607 10608 instruct convF2D_reg_reg(regD dst, regF src) 10609 %{ 10610 match(Set dst (ConvF2D src)); 10611 10612 format %{ "cvtss2sd $dst, $src" %} 10613 ins_encode %{ 10614 __ cvtss2sd ($dst$$XMMRegister, $src$$XMMRegister); 10615 %} 10616 ins_pipe(pipe_slow); // XXX 10617 %} 10618 10619 instruct convF2D_reg_mem(regD dst, memory src) 10620 %{ 10621 match(Set dst (ConvF2D (LoadF src))); 10622 10623 format %{ "cvtss2sd $dst, $src" %} 10624 ins_encode %{ 10625 __ cvtss2sd ($dst$$XMMRegister, $src$$Address); 10626 %} 10627 ins_pipe(pipe_slow); // XXX 10628 %} 10629 10630 instruct convD2F_reg_reg(regF dst, regD src) 10631 %{ 10632 match(Set dst (ConvD2F src)); 10633 10634 format %{ "cvtsd2ss $dst, $src" %} 10635 ins_encode %{ 10636 __ cvtsd2ss ($dst$$XMMRegister, $src$$XMMRegister); 10637 %} 10638 ins_pipe(pipe_slow); // XXX 10639 %} 10640 10641 instruct convD2F_reg_mem(regF dst, memory src) 10642 %{ 10643 match(Set dst (ConvD2F (LoadD src))); 10644 10645 format %{ "cvtsd2ss $dst, $src" %} 10646 ins_encode %{ 10647 __ cvtsd2ss ($dst$$XMMRegister, $src$$Address); 10648 %} 10649 ins_pipe(pipe_slow); // XXX 10650 %} 10651 10652 // XXX do mem variants 10653 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr) 10654 %{ 10655 match(Set dst (ConvF2I src)); 10656 effect(KILL cr); 10657 format %{ "convert_f2i $dst,$src" %} 10658 ins_encode %{ 10659 __ convert_f2i($dst$$Register, $src$$XMMRegister); 10660 %} 10661 ins_pipe(pipe_slow); 10662 %} 10663 10664 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr) 10665 %{ 10666 match(Set dst (ConvF2L src)); 10667 effect(KILL cr); 10668 format %{ "convert_f2l $dst,$src"%} 10669 ins_encode %{ 10670 __ convert_f2l($dst$$Register, $src$$XMMRegister); 10671 %} 10672 ins_pipe(pipe_slow); 10673 %} 10674 10675 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr) 10676 %{ 10677 match(Set dst (ConvD2I src)); 10678 effect(KILL cr); 10679 format %{ "convert_d2i $dst,$src"%} 10680 ins_encode %{ 10681 __ convert_d2i($dst$$Register, $src$$XMMRegister); 10682 %} 10683 ins_pipe(pipe_slow); 10684 %} 10685 10686 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr) 10687 %{ 10688 match(Set dst (ConvD2L src)); 10689 effect(KILL cr); 10690 format %{ "convert_d2l $dst,$src"%} 10691 ins_encode %{ 10692 __ convert_d2l($dst$$Register, $src$$XMMRegister); 10693 %} 10694 ins_pipe(pipe_slow); 10695 %} 10696 10697 instruct convI2F_reg_reg(regF dst, rRegI src) 10698 %{ 10699 predicate(!UseXmmI2F); 10700 match(Set dst (ConvI2F src)); 10701 10702 format %{ "cvtsi2ssl $dst, $src\t# i2f" %} 10703 ins_encode %{ 10704 __ cvtsi2ssl ($dst$$XMMRegister, $src$$Register); 10705 %} 10706 ins_pipe(pipe_slow); // XXX 10707 %} 10708 10709 instruct convI2F_reg_mem(regF dst, memory src) 10710 %{ 10711 match(Set dst (ConvI2F (LoadI src))); 10712 10713 format %{ "cvtsi2ssl $dst, $src\t# i2f" %} 10714 ins_encode %{ 10715 __ cvtsi2ssl ($dst$$XMMRegister, $src$$Address); 10716 %} 10717 ins_pipe(pipe_slow); // XXX 10718 %} 10719 10720 instruct convI2D_reg_reg(regD dst, rRegI src) 10721 %{ 10722 predicate(!UseXmmI2D); 10723 match(Set dst (ConvI2D src)); 10724 10725 format %{ "cvtsi2sdl $dst, $src\t# i2d" %} 10726 ins_encode %{ 10727 __ cvtsi2sdl ($dst$$XMMRegister, $src$$Register); 10728 %} 10729 ins_pipe(pipe_slow); // XXX 10730 %} 10731 10732 instruct convI2D_reg_mem(regD dst, memory src) 10733 %{ 10734 match(Set dst (ConvI2D (LoadI src))); 10735 10736 format %{ "cvtsi2sdl $dst, $src\t# i2d" %} 10737 ins_encode %{ 10738 __ cvtsi2sdl ($dst$$XMMRegister, $src$$Address); 10739 %} 10740 ins_pipe(pipe_slow); // XXX 10741 %} 10742 10743 instruct convXI2F_reg(regF dst, rRegI src) 10744 %{ 10745 predicate(UseXmmI2F); 10746 match(Set dst (ConvI2F src)); 10747 10748 format %{ "movdl $dst, $src\n\t" 10749 "cvtdq2psl $dst, $dst\t# i2f" %} 10750 ins_encode %{ 10751 __ movdl($dst$$XMMRegister, $src$$Register); 10752 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister); 10753 %} 10754 ins_pipe(pipe_slow); // XXX 10755 %} 10756 10757 instruct convXI2D_reg(regD dst, rRegI src) 10758 %{ 10759 predicate(UseXmmI2D); 10760 match(Set dst (ConvI2D src)); 10761 10762 format %{ "movdl $dst, $src\n\t" 10763 "cvtdq2pdl $dst, $dst\t# i2d" %} 10764 ins_encode %{ 10765 __ movdl($dst$$XMMRegister, $src$$Register); 10766 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister); 10767 %} 10768 ins_pipe(pipe_slow); // XXX 10769 %} 10770 10771 instruct convL2F_reg_reg(regF dst, rRegL src) 10772 %{ 10773 match(Set dst (ConvL2F src)); 10774 10775 format %{ "cvtsi2ssq $dst, $src\t# l2f" %} 10776 ins_encode %{ 10777 __ cvtsi2ssq ($dst$$XMMRegister, $src$$Register); 10778 %} 10779 ins_pipe(pipe_slow); // XXX 10780 %} 10781 10782 instruct convL2F_reg_mem(regF dst, memory src) 10783 %{ 10784 match(Set dst (ConvL2F (LoadL src))); 10785 10786 format %{ "cvtsi2ssq $dst, $src\t# l2f" %} 10787 ins_encode %{ 10788 __ cvtsi2ssq ($dst$$XMMRegister, $src$$Address); 10789 %} 10790 ins_pipe(pipe_slow); // XXX 10791 %} 10792 10793 instruct convL2D_reg_reg(regD dst, rRegL src) 10794 %{ 10795 match(Set dst (ConvL2D src)); 10796 10797 format %{ "cvtsi2sdq $dst, $src\t# l2d" %} 10798 ins_encode %{ 10799 __ cvtsi2sdq ($dst$$XMMRegister, $src$$Register); 10800 %} 10801 ins_pipe(pipe_slow); // XXX 10802 %} 10803 10804 instruct convL2D_reg_mem(regD dst, memory src) 10805 %{ 10806 match(Set dst (ConvL2D (LoadL src))); 10807 10808 format %{ "cvtsi2sdq $dst, $src\t# l2d" %} 10809 ins_encode %{ 10810 __ cvtsi2sdq ($dst$$XMMRegister, $src$$Address); 10811 %} 10812 ins_pipe(pipe_slow); // XXX 10813 %} 10814 10815 instruct convI2L_reg_reg(rRegL dst, rRegI src) 10816 %{ 10817 match(Set dst (ConvI2L src)); 10818 10819 ins_cost(125); 10820 format %{ "movslq $dst, $src\t# i2l" %} 10821 ins_encode %{ 10822 __ movslq($dst$$Register, $src$$Register); 10823 %} 10824 ins_pipe(ialu_reg_reg); 10825 %} 10826 10827 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src) 10828 // %{ 10829 // match(Set dst (ConvI2L src)); 10830 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 && 10831 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0); 10832 // predicate(((const TypeNode*) n)->type()->is_long()->_hi == 10833 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi && 10834 // ((const TypeNode*) n)->type()->is_long()->_lo == 10835 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo); 10836 10837 // format %{ "movl $dst, $src\t# unsigned i2l" %} 10838 // ins_encode(enc_copy(dst, src)); 10839 // // opcode(0x63); // needs REX.W 10840 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src)); 10841 // ins_pipe(ialu_reg_reg); 10842 // %} 10843 10844 // Zero-extend convert int to long 10845 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask) 10846 %{ 10847 match(Set dst (AndL (ConvI2L src) mask)); 10848 10849 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %} 10850 ins_encode %{ 10851 if ($dst$$reg != $src$$reg) { 10852 __ movl($dst$$Register, $src$$Register); 10853 } 10854 %} 10855 ins_pipe(ialu_reg_reg); 10856 %} 10857 10858 // Zero-extend convert int to long 10859 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask) 10860 %{ 10861 match(Set dst (AndL (ConvI2L (LoadI src)) mask)); 10862 10863 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %} 10864 ins_encode %{ 10865 __ movl($dst$$Register, $src$$Address); 10866 %} 10867 ins_pipe(ialu_reg_mem); 10868 %} 10869 10870 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask) 10871 %{ 10872 match(Set dst (AndL src mask)); 10873 10874 format %{ "movl $dst, $src\t# zero-extend long" %} 10875 ins_encode %{ 10876 __ movl($dst$$Register, $src$$Register); 10877 %} 10878 ins_pipe(ialu_reg_reg); 10879 %} 10880 10881 instruct convL2I_reg_reg(rRegI dst, rRegL src) 10882 %{ 10883 match(Set dst (ConvL2I src)); 10884 10885 format %{ "movl $dst, $src\t# l2i" %} 10886 ins_encode %{ 10887 __ movl($dst$$Register, $src$$Register); 10888 %} 10889 ins_pipe(ialu_reg_reg); 10890 %} 10891 10892 10893 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{ 10894 match(Set dst (MoveF2I src)); 10895 effect(DEF dst, USE src); 10896 10897 ins_cost(125); 10898 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %} 10899 ins_encode %{ 10900 __ movl($dst$$Register, Address(rsp, $src$$disp)); 10901 %} 10902 ins_pipe(ialu_reg_mem); 10903 %} 10904 10905 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 10906 match(Set dst (MoveI2F src)); 10907 effect(DEF dst, USE src); 10908 10909 ins_cost(125); 10910 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %} 10911 ins_encode %{ 10912 __ movflt($dst$$XMMRegister, Address(rsp, $src$$disp)); 10913 %} 10914 ins_pipe(pipe_slow); 10915 %} 10916 10917 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{ 10918 match(Set dst (MoveD2L src)); 10919 effect(DEF dst, USE src); 10920 10921 ins_cost(125); 10922 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %} 10923 ins_encode %{ 10924 __ movq($dst$$Register, Address(rsp, $src$$disp)); 10925 %} 10926 ins_pipe(ialu_reg_mem); 10927 %} 10928 10929 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{ 10930 predicate(!UseXmmLoadAndClearUpper); 10931 match(Set dst (MoveL2D src)); 10932 effect(DEF dst, USE src); 10933 10934 ins_cost(125); 10935 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %} 10936 ins_encode %{ 10937 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp)); 10938 %} 10939 ins_pipe(pipe_slow); 10940 %} 10941 10942 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 10943 predicate(UseXmmLoadAndClearUpper); 10944 match(Set dst (MoveL2D src)); 10945 effect(DEF dst, USE src); 10946 10947 ins_cost(125); 10948 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %} 10949 ins_encode %{ 10950 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp)); 10951 %} 10952 ins_pipe(pipe_slow); 10953 %} 10954 10955 10956 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 10957 match(Set dst (MoveF2I src)); 10958 effect(DEF dst, USE src); 10959 10960 ins_cost(95); // XXX 10961 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %} 10962 ins_encode %{ 10963 __ movflt(Address(rsp, $dst$$disp), $src$$XMMRegister); 10964 %} 10965 ins_pipe(pipe_slow); 10966 %} 10967 10968 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{ 10969 match(Set dst (MoveI2F src)); 10970 effect(DEF dst, USE src); 10971 10972 ins_cost(100); 10973 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %} 10974 ins_encode %{ 10975 __ movl(Address(rsp, $dst$$disp), $src$$Register); 10976 %} 10977 ins_pipe( ialu_mem_reg ); 10978 %} 10979 10980 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 10981 match(Set dst (MoveD2L src)); 10982 effect(DEF dst, USE src); 10983 10984 ins_cost(95); // XXX 10985 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %} 10986 ins_encode %{ 10987 __ movdbl(Address(rsp, $dst$$disp), $src$$XMMRegister); 10988 %} 10989 ins_pipe(pipe_slow); 10990 %} 10991 10992 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{ 10993 match(Set dst (MoveL2D src)); 10994 effect(DEF dst, USE src); 10995 10996 ins_cost(100); 10997 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %} 10998 ins_encode %{ 10999 __ movq(Address(rsp, $dst$$disp), $src$$Register); 11000 %} 11001 ins_pipe(ialu_mem_reg); 11002 %} 11003 11004 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{ 11005 match(Set dst (MoveF2I src)); 11006 effect(DEF dst, USE src); 11007 ins_cost(85); 11008 format %{ "movd $dst,$src\t# MoveF2I" %} 11009 ins_encode %{ 11010 __ movdl($dst$$Register, $src$$XMMRegister); 11011 %} 11012 ins_pipe( pipe_slow ); 11013 %} 11014 11015 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{ 11016 match(Set dst (MoveD2L src)); 11017 effect(DEF dst, USE src); 11018 ins_cost(85); 11019 format %{ "movd $dst,$src\t# MoveD2L" %} 11020 ins_encode %{ 11021 __ movdq($dst$$Register, $src$$XMMRegister); 11022 %} 11023 ins_pipe( pipe_slow ); 11024 %} 11025 11026 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{ 11027 match(Set dst (MoveI2F src)); 11028 effect(DEF dst, USE src); 11029 ins_cost(100); 11030 format %{ "movd $dst,$src\t# MoveI2F" %} 11031 ins_encode %{ 11032 __ movdl($dst$$XMMRegister, $src$$Register); 11033 %} 11034 ins_pipe( pipe_slow ); 11035 %} 11036 11037 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{ 11038 match(Set dst (MoveL2D src)); 11039 effect(DEF dst, USE src); 11040 ins_cost(100); 11041 format %{ "movd $dst,$src\t# MoveL2D" %} 11042 ins_encode %{ 11043 __ movdq($dst$$XMMRegister, $src$$Register); 11044 %} 11045 ins_pipe( pipe_slow ); 11046 %} 11047 11048 // Fast clearing of an array 11049 // Small ClearArray non-AVX512. 11050 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, 11051 Universe dummy, rFlagsReg cr) 11052 %{ 11053 predicate(!((ClearArrayNode*)n)->is_large() && (UseAVX <= 2)); 11054 match(Set dummy (ClearArray cnt base)); 11055 effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr); 11056 11057 format %{ $$template 11058 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11059 $$emit$$"cmp InitArrayShortSize,rcx\n\t" 11060 $$emit$$"jg LARGE\n\t" 11061 $$emit$$"dec rcx\n\t" 11062 $$emit$$"js DONE\t# Zero length\n\t" 11063 $$emit$$"mov rax,(rdi,rcx,8)\t# LOOP\n\t" 11064 $$emit$$"dec rcx\n\t" 11065 $$emit$$"jge LOOP\n\t" 11066 $$emit$$"jmp DONE\n\t" 11067 $$emit$$"# LARGE:\n\t" 11068 if (UseFastStosb) { 11069 $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" 11070 $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--\n\t" 11071 } else if (UseXMMForObjInit) { 11072 $$emit$$"mov rdi,rax\n\t" 11073 $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" 11074 $$emit$$"jmpq L_zero_64_bytes\n\t" 11075 $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" 11076 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11077 $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" 11078 $$emit$$"add 0x40,rax\n\t" 11079 $$emit$$"# L_zero_64_bytes:\n\t" 11080 $$emit$$"sub 0x8,rcx\n\t" 11081 $$emit$$"jge L_loop\n\t" 11082 $$emit$$"add 0x4,rcx\n\t" 11083 $$emit$$"jl L_tail\n\t" 11084 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11085 $$emit$$"add 0x20,rax\n\t" 11086 $$emit$$"sub 0x4,rcx\n\t" 11087 $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" 11088 $$emit$$"add 0x4,rcx\n\t" 11089 $$emit$$"jle L_end\n\t" 11090 $$emit$$"dec rcx\n\t" 11091 $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" 11092 $$emit$$"vmovq xmm0,(rax)\n\t" 11093 $$emit$$"add 0x8,rax\n\t" 11094 $$emit$$"dec rcx\n\t" 11095 $$emit$$"jge L_sloop\n\t" 11096 $$emit$$"# L_end:\n\t" 11097 } else { 11098 $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--\n\t" 11099 } 11100 $$emit$$"# DONE" 11101 %} 11102 ins_encode %{ 11103 __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 11104 $tmp$$XMMRegister, false, knoreg); 11105 %} 11106 ins_pipe(pipe_slow); 11107 %} 11108 11109 // Small ClearArray AVX512 non-constant length. 11110 instruct rep_stos_evex(rcx_RegL cnt, rdi_RegP base, legRegD tmp, kReg ktmp, rax_RegI zero, 11111 Universe dummy, rFlagsReg cr) 11112 %{ 11113 predicate(!((ClearArrayNode*)n)->is_large() && (UseAVX > 2)); 11114 match(Set dummy (ClearArray cnt base)); 11115 ins_cost(125); 11116 effect(USE_KILL cnt, USE_KILL base, TEMP tmp, TEMP ktmp, KILL zero, KILL cr); 11117 11118 format %{ $$template 11119 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11120 $$emit$$"cmp InitArrayShortSize,rcx\n\t" 11121 $$emit$$"jg LARGE\n\t" 11122 $$emit$$"dec rcx\n\t" 11123 $$emit$$"js DONE\t# Zero length\n\t" 11124 $$emit$$"mov rax,(rdi,rcx,8)\t# LOOP\n\t" 11125 $$emit$$"dec rcx\n\t" 11126 $$emit$$"jge LOOP\n\t" 11127 $$emit$$"jmp DONE\n\t" 11128 $$emit$$"# LARGE:\n\t" 11129 if (UseFastStosb) { 11130 $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" 11131 $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--\n\t" 11132 } else if (UseXMMForObjInit) { 11133 $$emit$$"mov rdi,rax\n\t" 11134 $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" 11135 $$emit$$"jmpq L_zero_64_bytes\n\t" 11136 $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" 11137 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11138 $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" 11139 $$emit$$"add 0x40,rax\n\t" 11140 $$emit$$"# L_zero_64_bytes:\n\t" 11141 $$emit$$"sub 0x8,rcx\n\t" 11142 $$emit$$"jge L_loop\n\t" 11143 $$emit$$"add 0x4,rcx\n\t" 11144 $$emit$$"jl L_tail\n\t" 11145 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11146 $$emit$$"add 0x20,rax\n\t" 11147 $$emit$$"sub 0x4,rcx\n\t" 11148 $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" 11149 $$emit$$"add 0x4,rcx\n\t" 11150 $$emit$$"jle L_end\n\t" 11151 $$emit$$"dec rcx\n\t" 11152 $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" 11153 $$emit$$"vmovq xmm0,(rax)\n\t" 11154 $$emit$$"add 0x8,rax\n\t" 11155 $$emit$$"dec rcx\n\t" 11156 $$emit$$"jge L_sloop\n\t" 11157 $$emit$$"# L_end:\n\t" 11158 } else { 11159 $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--\n\t" 11160 } 11161 $$emit$$"# DONE" 11162 %} 11163 ins_encode %{ 11164 __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 11165 $tmp$$XMMRegister, false, $ktmp$$KRegister); 11166 %} 11167 ins_pipe(pipe_slow); 11168 %} 11169 11170 // Large ClearArray non-AVX512. 11171 instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, 11172 Universe dummy, rFlagsReg cr) 11173 %{ 11174 predicate((UseAVX <=2) && ((ClearArrayNode*)n)->is_large()); 11175 match(Set dummy (ClearArray cnt base)); 11176 effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr); 11177 11178 format %{ $$template 11179 if (UseFastStosb) { 11180 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11181 $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" 11182 $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--" 11183 } else if (UseXMMForObjInit) { 11184 $$emit$$"mov rdi,rax\t# ClearArray:\n\t" 11185 $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" 11186 $$emit$$"jmpq L_zero_64_bytes\n\t" 11187 $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" 11188 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11189 $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" 11190 $$emit$$"add 0x40,rax\n\t" 11191 $$emit$$"# L_zero_64_bytes:\n\t" 11192 $$emit$$"sub 0x8,rcx\n\t" 11193 $$emit$$"jge L_loop\n\t" 11194 $$emit$$"add 0x4,rcx\n\t" 11195 $$emit$$"jl L_tail\n\t" 11196 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11197 $$emit$$"add 0x20,rax\n\t" 11198 $$emit$$"sub 0x4,rcx\n\t" 11199 $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" 11200 $$emit$$"add 0x4,rcx\n\t" 11201 $$emit$$"jle L_end\n\t" 11202 $$emit$$"dec rcx\n\t" 11203 $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" 11204 $$emit$$"vmovq xmm0,(rax)\n\t" 11205 $$emit$$"add 0x8,rax\n\t" 11206 $$emit$$"dec rcx\n\t" 11207 $$emit$$"jge L_sloop\n\t" 11208 $$emit$$"# L_end:\n\t" 11209 } else { 11210 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11211 $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--" 11212 } 11213 %} 11214 ins_encode %{ 11215 __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 11216 $tmp$$XMMRegister, true, knoreg); 11217 %} 11218 ins_pipe(pipe_slow); 11219 %} 11220 11221 // Large ClearArray AVX512. 11222 instruct rep_stos_large_evex(rcx_RegL cnt, rdi_RegP base, legRegD tmp, kReg ktmp, rax_RegI zero, 11223 Universe dummy, rFlagsReg cr) 11224 %{ 11225 predicate((UseAVX > 2) && ((ClearArrayNode*)n)->is_large()); 11226 match(Set dummy (ClearArray cnt base)); 11227 effect(USE_KILL cnt, USE_KILL base, TEMP tmp, TEMP ktmp, KILL zero, KILL cr); 11228 11229 format %{ $$template 11230 if (UseFastStosb) { 11231 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11232 $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" 11233 $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--" 11234 } else if (UseXMMForObjInit) { 11235 $$emit$$"mov rdi,rax\t# ClearArray:\n\t" 11236 $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" 11237 $$emit$$"jmpq L_zero_64_bytes\n\t" 11238 $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" 11239 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11240 $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" 11241 $$emit$$"add 0x40,rax\n\t" 11242 $$emit$$"# L_zero_64_bytes:\n\t" 11243 $$emit$$"sub 0x8,rcx\n\t" 11244 $$emit$$"jge L_loop\n\t" 11245 $$emit$$"add 0x4,rcx\n\t" 11246 $$emit$$"jl L_tail\n\t" 11247 $$emit$$"vmovdqu ymm0,(rax)\n\t" 11248 $$emit$$"add 0x20,rax\n\t" 11249 $$emit$$"sub 0x4,rcx\n\t" 11250 $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" 11251 $$emit$$"add 0x4,rcx\n\t" 11252 $$emit$$"jle L_end\n\t" 11253 $$emit$$"dec rcx\n\t" 11254 $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" 11255 $$emit$$"vmovq xmm0,(rax)\n\t" 11256 $$emit$$"add 0x8,rax\n\t" 11257 $$emit$$"dec rcx\n\t" 11258 $$emit$$"jge L_sloop\n\t" 11259 $$emit$$"# L_end:\n\t" 11260 } else { 11261 $$emit$$"xorq rax, rax\t# ClearArray:\n\t" 11262 $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--" 11263 } 11264 %} 11265 ins_encode %{ 11266 __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 11267 $tmp$$XMMRegister, true, $ktmp$$KRegister); 11268 %} 11269 ins_pipe(pipe_slow); 11270 %} 11271 11272 // Small ClearArray AVX512 constant length. 11273 instruct rep_stos_im(immL cnt, rRegP base, regD tmp, rRegI zero, kReg ktmp, Universe dummy, rFlagsReg cr) 11274 %{ 11275 predicate(!((ClearArrayNode*)n)->is_large() && 11276 ((UseAVX > 2) && VM_Version::supports_avx512vlbw())); 11277 match(Set dummy (ClearArray cnt base)); 11278 ins_cost(100); 11279 effect(TEMP tmp, TEMP zero, TEMP ktmp, KILL cr); 11280 format %{ "clear_mem_imm $base , $cnt \n\t" %} 11281 ins_encode %{ 11282 __ clear_mem($base$$Register, $cnt$$constant, $zero$$Register, $tmp$$XMMRegister, $ktmp$$KRegister); 11283 %} 11284 ins_pipe(pipe_slow); 11285 %} 11286 11287 instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11288 rax_RegI result, legRegD tmp1, rFlagsReg cr) 11289 %{ 11290 predicate(!VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL); 11291 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11292 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11293 11294 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11295 ins_encode %{ 11296 __ string_compare($str1$$Register, $str2$$Register, 11297 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11298 $tmp1$$XMMRegister, StrIntrinsicNode::LL, knoreg); 11299 %} 11300 ins_pipe( pipe_slow ); 11301 %} 11302 11303 instruct string_compareL_evex(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11304 rax_RegI result, legRegD tmp1, kReg ktmp, rFlagsReg cr) 11305 %{ 11306 predicate(VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL); 11307 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11308 effect(TEMP tmp1, TEMP ktmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11309 11310 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11311 ins_encode %{ 11312 __ string_compare($str1$$Register, $str2$$Register, 11313 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11314 $tmp1$$XMMRegister, StrIntrinsicNode::LL, $ktmp$$KRegister); 11315 %} 11316 ins_pipe( pipe_slow ); 11317 %} 11318 11319 instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11320 rax_RegI result, legRegD tmp1, rFlagsReg cr) 11321 %{ 11322 predicate(!VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU); 11323 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11324 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11325 11326 format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11327 ins_encode %{ 11328 __ string_compare($str1$$Register, $str2$$Register, 11329 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11330 $tmp1$$XMMRegister, StrIntrinsicNode::UU, knoreg); 11331 %} 11332 ins_pipe( pipe_slow ); 11333 %} 11334 11335 instruct string_compareU_evex(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11336 rax_RegI result, legRegD tmp1, kReg ktmp, rFlagsReg cr) 11337 %{ 11338 predicate(VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU); 11339 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11340 effect(TEMP tmp1, TEMP ktmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11341 11342 format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11343 ins_encode %{ 11344 __ string_compare($str1$$Register, $str2$$Register, 11345 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11346 $tmp1$$XMMRegister, StrIntrinsicNode::UU, $ktmp$$KRegister); 11347 %} 11348 ins_pipe( pipe_slow ); 11349 %} 11350 11351 instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11352 rax_RegI result, legRegD tmp1, rFlagsReg cr) 11353 %{ 11354 predicate(!VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU); 11355 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11356 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11357 11358 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11359 ins_encode %{ 11360 __ string_compare($str1$$Register, $str2$$Register, 11361 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11362 $tmp1$$XMMRegister, StrIntrinsicNode::LU, knoreg); 11363 %} 11364 ins_pipe( pipe_slow ); 11365 %} 11366 11367 instruct string_compareLU_evex(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11368 rax_RegI result, legRegD tmp1, kReg ktmp, rFlagsReg cr) 11369 %{ 11370 predicate(VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU); 11371 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11372 effect(TEMP tmp1, TEMP ktmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11373 11374 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11375 ins_encode %{ 11376 __ string_compare($str1$$Register, $str2$$Register, 11377 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11378 $tmp1$$XMMRegister, StrIntrinsicNode::LU, $ktmp$$KRegister); 11379 %} 11380 ins_pipe( pipe_slow ); 11381 %} 11382 11383 instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2, 11384 rax_RegI result, legRegD tmp1, rFlagsReg cr) 11385 %{ 11386 predicate(!VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL); 11387 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11388 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11389 11390 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11391 ins_encode %{ 11392 __ string_compare($str2$$Register, $str1$$Register, 11393 $cnt2$$Register, $cnt1$$Register, $result$$Register, 11394 $tmp1$$XMMRegister, StrIntrinsicNode::UL, knoreg); 11395 %} 11396 ins_pipe( pipe_slow ); 11397 %} 11398 11399 instruct string_compareUL_evex(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2, 11400 rax_RegI result, legRegD tmp1, kReg ktmp, rFlagsReg cr) 11401 %{ 11402 predicate(VM_Version::supports_avx512vlbw() && ((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL); 11403 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11404 effect(TEMP tmp1, TEMP ktmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11405 11406 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11407 ins_encode %{ 11408 __ string_compare($str2$$Register, $str1$$Register, 11409 $cnt2$$Register, $cnt1$$Register, $result$$Register, 11410 $tmp1$$XMMRegister, StrIntrinsicNode::UL, $ktmp$$KRegister); 11411 %} 11412 ins_pipe( pipe_slow ); 11413 %} 11414 11415 // fast search of substring with known size. 11416 instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, 11417 rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) 11418 %{ 11419 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL)); 11420 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 11421 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 11422 11423 format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %} 11424 ins_encode %{ 11425 int icnt2 = (int)$int_cnt2$$constant; 11426 if (icnt2 >= 16) { 11427 // IndexOf for constant substrings with size >= 16 elements 11428 // which don't need to be loaded through stack. 11429 __ string_indexofC8($str1$$Register, $str2$$Register, 11430 $cnt1$$Register, $cnt2$$Register, 11431 icnt2, $result$$Register, 11432 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL); 11433 } else { 11434 // Small strings are loaded through stack if they cross page boundary. 11435 __ string_indexof($str1$$Register, $str2$$Register, 11436 $cnt1$$Register, $cnt2$$Register, 11437 icnt2, $result$$Register, 11438 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL); 11439 } 11440 %} 11441 ins_pipe( pipe_slow ); 11442 %} 11443 11444 // fast search of substring with known size. 11445 instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, 11446 rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) 11447 %{ 11448 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU)); 11449 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 11450 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 11451 11452 format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %} 11453 ins_encode %{ 11454 int icnt2 = (int)$int_cnt2$$constant; 11455 if (icnt2 >= 8) { 11456 // IndexOf for constant substrings with size >= 8 elements 11457 // which don't need to be loaded through stack. 11458 __ string_indexofC8($str1$$Register, $str2$$Register, 11459 $cnt1$$Register, $cnt2$$Register, 11460 icnt2, $result$$Register, 11461 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU); 11462 } else { 11463 // Small strings are loaded through stack if they cross page boundary. 11464 __ string_indexof($str1$$Register, $str2$$Register, 11465 $cnt1$$Register, $cnt2$$Register, 11466 icnt2, $result$$Register, 11467 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU); 11468 } 11469 %} 11470 ins_pipe( pipe_slow ); 11471 %} 11472 11473 // fast search of substring with known size. 11474 instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, 11475 rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) 11476 %{ 11477 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL)); 11478 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 11479 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 11480 11481 format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %} 11482 ins_encode %{ 11483 int icnt2 = (int)$int_cnt2$$constant; 11484 if (icnt2 >= 8) { 11485 // IndexOf for constant substrings with size >= 8 elements 11486 // which don't need to be loaded through stack. 11487 __ string_indexofC8($str1$$Register, $str2$$Register, 11488 $cnt1$$Register, $cnt2$$Register, 11489 icnt2, $result$$Register, 11490 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL); 11491 } else { 11492 // Small strings are loaded through stack if they cross page boundary. 11493 __ string_indexof($str1$$Register, $str2$$Register, 11494 $cnt1$$Register, $cnt2$$Register, 11495 icnt2, $result$$Register, 11496 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL); 11497 } 11498 %} 11499 ins_pipe( pipe_slow ); 11500 %} 11501 11502 instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, 11503 rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr) 11504 %{ 11505 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL)); 11506 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 11507 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 11508 11509 format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 11510 ins_encode %{ 11511 __ string_indexof($str1$$Register, $str2$$Register, 11512 $cnt1$$Register, $cnt2$$Register, 11513 (-1), $result$$Register, 11514 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL); 11515 %} 11516 ins_pipe( pipe_slow ); 11517 %} 11518 11519 instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, 11520 rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr) 11521 %{ 11522 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU)); 11523 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 11524 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 11525 11526 format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 11527 ins_encode %{ 11528 __ string_indexof($str1$$Register, $str2$$Register, 11529 $cnt1$$Register, $cnt2$$Register, 11530 (-1), $result$$Register, 11531 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU); 11532 %} 11533 ins_pipe( pipe_slow ); 11534 %} 11535 11536 instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, 11537 rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr) 11538 %{ 11539 predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL)); 11540 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 11541 effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 11542 11543 format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 11544 ins_encode %{ 11545 __ string_indexof($str1$$Register, $str2$$Register, 11546 $cnt1$$Register, $cnt2$$Register, 11547 (-1), $result$$Register, 11548 $tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL); 11549 %} 11550 ins_pipe( pipe_slow ); 11551 %} 11552 11553 instruct string_indexof_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch, 11554 rbx_RegI result, legRegD tmp_vec1, legRegD tmp_vec2, legRegD tmp_vec3, rcx_RegI tmp, rFlagsReg cr) 11555 %{ 11556 predicate(UseSSE42Intrinsics && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U)); 11557 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); 11558 effect(TEMP tmp_vec1, TEMP tmp_vec2, TEMP tmp_vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr); 11559 format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result // KILL all" %} 11560 ins_encode %{ 11561 __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register, 11562 $tmp_vec1$$XMMRegister, $tmp_vec2$$XMMRegister, $tmp_vec3$$XMMRegister, $tmp$$Register); 11563 %} 11564 ins_pipe( pipe_slow ); 11565 %} 11566 11567 instruct stringL_indexof_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch, 11568 rbx_RegI result, legRegD tmp_vec1, legRegD tmp_vec2, legRegD tmp_vec3, rcx_RegI tmp, rFlagsReg cr) 11569 %{ 11570 predicate(UseSSE42Intrinsics && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L)); 11571 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); 11572 effect(TEMP tmp_vec1, TEMP tmp_vec2, TEMP tmp_vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr); 11573 format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result // KILL all" %} 11574 ins_encode %{ 11575 __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register, 11576 $tmp_vec1$$XMMRegister, $tmp_vec2$$XMMRegister, $tmp_vec3$$XMMRegister, $tmp$$Register); 11577 %} 11578 ins_pipe( pipe_slow ); 11579 %} 11580 11581 // fast string equals 11582 instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result, 11583 legRegD tmp1, legRegD tmp2, rbx_RegI tmp3, rFlagsReg cr) 11584 %{ 11585 predicate(!VM_Version::supports_avx512vlbw()); 11586 match(Set result (StrEquals (Binary str1 str2) cnt)); 11587 effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr); 11588 11589 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %} 11590 ins_encode %{ 11591 __ arrays_equals(false, $str1$$Register, $str2$$Register, 11592 $cnt$$Register, $result$$Register, $tmp3$$Register, 11593 $tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */, knoreg); 11594 %} 11595 ins_pipe( pipe_slow ); 11596 %} 11597 11598 instruct string_equals_evex(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result, 11599 legRegD tmp1, legRegD tmp2, kReg ktmp, rbx_RegI tmp3, rFlagsReg cr) 11600 %{ 11601 predicate(VM_Version::supports_avx512vlbw()); 11602 match(Set result (StrEquals (Binary str1 str2) cnt)); 11603 effect(TEMP tmp1, TEMP tmp2, TEMP ktmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr); 11604 11605 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %} 11606 ins_encode %{ 11607 __ arrays_equals(false, $str1$$Register, $str2$$Register, 11608 $cnt$$Register, $result$$Register, $tmp3$$Register, 11609 $tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */, $ktmp$$KRegister); 11610 %} 11611 ins_pipe( pipe_slow ); 11612 %} 11613 11614 // fast array equals 11615 instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, 11616 legRegD tmp1, legRegD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) 11617 %{ 11618 predicate(!VM_Version::supports_avx512vlbw() && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); 11619 match(Set result (AryEq ary1 ary2)); 11620 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr); 11621 11622 format %{ "Array Equals byte[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %} 11623 ins_encode %{ 11624 __ arrays_equals(true, $ary1$$Register, $ary2$$Register, 11625 $tmp3$$Register, $result$$Register, $tmp4$$Register, 11626 $tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */, knoreg); 11627 %} 11628 ins_pipe( pipe_slow ); 11629 %} 11630 11631 instruct array_equalsB_evex(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, 11632 legRegD tmp1, legRegD tmp2, kReg ktmp, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) 11633 %{ 11634 predicate(VM_Version::supports_avx512vlbw() && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); 11635 match(Set result (AryEq ary1 ary2)); 11636 effect(TEMP tmp1, TEMP tmp2, TEMP ktmp, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr); 11637 11638 format %{ "Array Equals byte[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %} 11639 ins_encode %{ 11640 __ arrays_equals(true, $ary1$$Register, $ary2$$Register, 11641 $tmp3$$Register, $result$$Register, $tmp4$$Register, 11642 $tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */, $ktmp$$KRegister); 11643 %} 11644 ins_pipe( pipe_slow ); 11645 %} 11646 11647 instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, 11648 legRegD tmp1, legRegD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) 11649 %{ 11650 predicate(!VM_Version::supports_avx512vlbw() && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); 11651 match(Set result (AryEq ary1 ary2)); 11652 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr); 11653 11654 format %{ "Array Equals char[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %} 11655 ins_encode %{ 11656 __ arrays_equals(true, $ary1$$Register, $ary2$$Register, 11657 $tmp3$$Register, $result$$Register, $tmp4$$Register, 11658 $tmp1$$XMMRegister, $tmp2$$XMMRegister, true /* char */, knoreg); 11659 %} 11660 ins_pipe( pipe_slow ); 11661 %} 11662 11663 instruct array_equalsC_evex(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, 11664 legRegD tmp1, legRegD tmp2, kReg ktmp, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) 11665 %{ 11666 predicate(VM_Version::supports_avx512vlbw() && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); 11667 match(Set result (AryEq ary1 ary2)); 11668 effect(TEMP tmp1, TEMP tmp2, TEMP ktmp, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr); 11669 11670 format %{ "Array Equals char[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %} 11671 ins_encode %{ 11672 __ arrays_equals(true, $ary1$$Register, $ary2$$Register, 11673 $tmp3$$Register, $result$$Register, $tmp4$$Register, 11674 $tmp1$$XMMRegister, $tmp2$$XMMRegister, true /* char */, $ktmp$$KRegister); 11675 %} 11676 ins_pipe( pipe_slow ); 11677 %} 11678 11679 instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result, 11680 legRegD tmp1, legRegD tmp2, rbx_RegI tmp3, rFlagsReg cr,) 11681 %{ 11682 predicate(!VM_Version::supports_avx512vlbw() || !VM_Version::supports_bmi2()); 11683 match(Set result (HasNegatives ary1 len)); 11684 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr); 11685 11686 format %{ "has negatives byte[] $ary1,$len -> $result // KILL $tmp1, $tmp2, $tmp3" %} 11687 ins_encode %{ 11688 __ has_negatives($ary1$$Register, $len$$Register, 11689 $result$$Register, $tmp3$$Register, 11690 $tmp1$$XMMRegister, $tmp2$$XMMRegister, knoreg, knoreg); 11691 %} 11692 ins_pipe( pipe_slow ); 11693 %} 11694 11695 instruct has_negatives_evex(rsi_RegP ary1, rcx_RegI len, rax_RegI result, 11696 legRegD tmp1, legRegD tmp2, kReg ktmp1, kReg ktmp2, rbx_RegI tmp3, rFlagsReg cr,) 11697 %{ 11698 predicate(VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2()); 11699 match(Set result (HasNegatives ary1 len)); 11700 effect(TEMP tmp1, TEMP tmp2, TEMP ktmp1, TEMP ktmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr); 11701 11702 format %{ "has negatives byte[] $ary1,$len -> $result // KILL $tmp1, $tmp2, $tmp3" %} 11703 ins_encode %{ 11704 __ has_negatives($ary1$$Register, $len$$Register, 11705 $result$$Register, $tmp3$$Register, 11706 $tmp1$$XMMRegister, $tmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); 11707 %} 11708 ins_pipe( pipe_slow ); 11709 %} 11710 11711 // fast char[] to byte[] compression 11712 instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legRegD tmp1, legRegD tmp2, legRegD tmp3, 11713 legRegD tmp4, rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ 11714 predicate(!VM_Version::supports_avx512vlbw() || !VM_Version::supports_bmi2()); 11715 match(Set result (StrCompressedCopy src (Binary dst len))); 11716 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, 11717 USE_KILL len, KILL tmp5, KILL cr); 11718 11719 format %{ "String Compress $src,$dst -> $result // KILL RAX, RCX, RDX" %} 11720 ins_encode %{ 11721 __ char_array_compress($src$$Register, $dst$$Register, $len$$Register, 11722 $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister, 11723 $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register, 11724 knoreg, knoreg); 11725 %} 11726 ins_pipe( pipe_slow ); 11727 %} 11728 11729 instruct string_compress_evex(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legRegD tmp1, legRegD tmp2, legRegD tmp3, 11730 legRegD tmp4, kReg ktmp1, kReg ktmp2, rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ 11731 predicate(VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2()); 11732 match(Set result (StrCompressedCopy src (Binary dst len))); 11733 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ktmp1, TEMP ktmp2, USE_KILL src, USE_KILL dst, 11734 USE_KILL len, KILL tmp5, KILL cr); 11735 11736 format %{ "String Compress $src,$dst -> $result // KILL RAX, RCX, RDX" %} 11737 ins_encode %{ 11738 __ char_array_compress($src$$Register, $dst$$Register, $len$$Register, 11739 $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister, 11740 $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register, 11741 $ktmp1$$KRegister, $ktmp2$$KRegister); 11742 %} 11743 ins_pipe( pipe_slow ); 11744 %} 11745 // fast byte[] to char[] inflation 11746 instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len, 11747 legRegD tmp1, rcx_RegI tmp2, rFlagsReg cr) %{ 11748 predicate(!VM_Version::supports_avx512vlbw() || !VM_Version::supports_bmi2()); 11749 match(Set dummy (StrInflatedCopy src (Binary dst len))); 11750 effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); 11751 11752 format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %} 11753 ins_encode %{ 11754 __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register, 11755 $tmp1$$XMMRegister, $tmp2$$Register, knoreg); 11756 %} 11757 ins_pipe( pipe_slow ); 11758 %} 11759 11760 instruct string_inflate_evex(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len, 11761 legRegD tmp1, kReg ktmp, rcx_RegI tmp2, rFlagsReg cr) %{ 11762 predicate(VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2()); 11763 match(Set dummy (StrInflatedCopy src (Binary dst len))); 11764 effect(TEMP tmp1, TEMP tmp2, TEMP ktmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); 11765 11766 format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %} 11767 ins_encode %{ 11768 __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register, 11769 $tmp1$$XMMRegister, $tmp2$$Register, $ktmp$$KRegister); 11770 %} 11771 ins_pipe( pipe_slow ); 11772 %} 11773 11774 // encode char[] to byte[] in ISO_8859_1 11775 instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len, 11776 legRegD tmp1, legRegD tmp2, legRegD tmp3, legRegD tmp4, 11777 rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ 11778 predicate(!((EncodeISOArrayNode*)n)->is_ascii()); 11779 match(Set result (EncodeISOArray src (Binary dst len))); 11780 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr); 11781 11782 format %{ "Encode iso array $src,$dst,$len -> $result // KILL RCX, RDX, $tmp1, $tmp2, $tmp3, $tmp4, RSI, RDI " %} 11783 ins_encode %{ 11784 __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, 11785 $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister, 11786 $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register, false); 11787 %} 11788 ins_pipe( pipe_slow ); 11789 %} 11790 11791 // encode char[] to byte[] in ASCII 11792 instruct encode_ascii_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len, 11793 legRegD tmp1, legRegD tmp2, legRegD tmp3, legRegD tmp4, 11794 rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ 11795 predicate(((EncodeISOArrayNode*)n)->is_ascii()); 11796 match(Set result (EncodeISOArray src (Binary dst len))); 11797 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr); 11798 11799 format %{ "Encode ascii array $src,$dst,$len -> $result // KILL RCX, RDX, $tmp1, $tmp2, $tmp3, $tmp4, RSI, RDI " %} 11800 ins_encode %{ 11801 __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, 11802 $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister, 11803 $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register, true); 11804 %} 11805 ins_pipe( pipe_slow ); 11806 %} 11807 11808 //----------Overflow Math Instructions----------------------------------------- 11809 11810 instruct overflowAddI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2) 11811 %{ 11812 match(Set cr (OverflowAddI op1 op2)); 11813 effect(DEF cr, USE_KILL op1, USE op2); 11814 11815 format %{ "addl $op1, $op2\t# overflow check int" %} 11816 11817 ins_encode %{ 11818 __ addl($op1$$Register, $op2$$Register); 11819 %} 11820 ins_pipe(ialu_reg_reg); 11821 %} 11822 11823 instruct overflowAddI_rReg_imm(rFlagsReg cr, rax_RegI op1, immI op2) 11824 %{ 11825 match(Set cr (OverflowAddI op1 op2)); 11826 effect(DEF cr, USE_KILL op1, USE op2); 11827 11828 format %{ "addl $op1, $op2\t# overflow check int" %} 11829 11830 ins_encode %{ 11831 __ addl($op1$$Register, $op2$$constant); 11832 %} 11833 ins_pipe(ialu_reg_reg); 11834 %} 11835 11836 instruct overflowAddL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2) 11837 %{ 11838 match(Set cr (OverflowAddL op1 op2)); 11839 effect(DEF cr, USE_KILL op1, USE op2); 11840 11841 format %{ "addq $op1, $op2\t# overflow check long" %} 11842 ins_encode %{ 11843 __ addq($op1$$Register, $op2$$Register); 11844 %} 11845 ins_pipe(ialu_reg_reg); 11846 %} 11847 11848 instruct overflowAddL_rReg_imm(rFlagsReg cr, rax_RegL op1, immL32 op2) 11849 %{ 11850 match(Set cr (OverflowAddL op1 op2)); 11851 effect(DEF cr, USE_KILL op1, USE op2); 11852 11853 format %{ "addq $op1, $op2\t# overflow check long" %} 11854 ins_encode %{ 11855 __ addq($op1$$Register, $op2$$constant); 11856 %} 11857 ins_pipe(ialu_reg_reg); 11858 %} 11859 11860 instruct overflowSubI_rReg(rFlagsReg cr, rRegI op1, rRegI op2) 11861 %{ 11862 match(Set cr (OverflowSubI op1 op2)); 11863 11864 format %{ "cmpl $op1, $op2\t# overflow check int" %} 11865 ins_encode %{ 11866 __ cmpl($op1$$Register, $op2$$Register); 11867 %} 11868 ins_pipe(ialu_reg_reg); 11869 %} 11870 11871 instruct overflowSubI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2) 11872 %{ 11873 match(Set cr (OverflowSubI op1 op2)); 11874 11875 format %{ "cmpl $op1, $op2\t# overflow check int" %} 11876 ins_encode %{ 11877 __ cmpl($op1$$Register, $op2$$constant); 11878 %} 11879 ins_pipe(ialu_reg_reg); 11880 %} 11881 11882 instruct overflowSubL_rReg(rFlagsReg cr, rRegL op1, rRegL op2) 11883 %{ 11884 match(Set cr (OverflowSubL op1 op2)); 11885 11886 format %{ "cmpq $op1, $op2\t# overflow check long" %} 11887 ins_encode %{ 11888 __ cmpq($op1$$Register, $op2$$Register); 11889 %} 11890 ins_pipe(ialu_reg_reg); 11891 %} 11892 11893 instruct overflowSubL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2) 11894 %{ 11895 match(Set cr (OverflowSubL op1 op2)); 11896 11897 format %{ "cmpq $op1, $op2\t# overflow check long" %} 11898 ins_encode %{ 11899 __ cmpq($op1$$Register, $op2$$constant); 11900 %} 11901 ins_pipe(ialu_reg_reg); 11902 %} 11903 11904 instruct overflowNegI_rReg(rFlagsReg cr, immI_0 zero, rax_RegI op2) 11905 %{ 11906 match(Set cr (OverflowSubI zero op2)); 11907 effect(DEF cr, USE_KILL op2); 11908 11909 format %{ "negl $op2\t# overflow check int" %} 11910 ins_encode %{ 11911 __ negl($op2$$Register); 11912 %} 11913 ins_pipe(ialu_reg_reg); 11914 %} 11915 11916 instruct overflowNegL_rReg(rFlagsReg cr, immL0 zero, rax_RegL op2) 11917 %{ 11918 match(Set cr (OverflowSubL zero op2)); 11919 effect(DEF cr, USE_KILL op2); 11920 11921 format %{ "negq $op2\t# overflow check long" %} 11922 ins_encode %{ 11923 __ negq($op2$$Register); 11924 %} 11925 ins_pipe(ialu_reg_reg); 11926 %} 11927 11928 instruct overflowMulI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2) 11929 %{ 11930 match(Set cr (OverflowMulI op1 op2)); 11931 effect(DEF cr, USE_KILL op1, USE op2); 11932 11933 format %{ "imull $op1, $op2\t# overflow check int" %} 11934 ins_encode %{ 11935 __ imull($op1$$Register, $op2$$Register); 11936 %} 11937 ins_pipe(ialu_reg_reg_alu0); 11938 %} 11939 11940 instruct overflowMulI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2, rRegI tmp) 11941 %{ 11942 match(Set cr (OverflowMulI op1 op2)); 11943 effect(DEF cr, TEMP tmp, USE op1, USE op2); 11944 11945 format %{ "imull $tmp, $op1, $op2\t# overflow check int" %} 11946 ins_encode %{ 11947 __ imull($tmp$$Register, $op1$$Register, $op2$$constant); 11948 %} 11949 ins_pipe(ialu_reg_reg_alu0); 11950 %} 11951 11952 instruct overflowMulL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2) 11953 %{ 11954 match(Set cr (OverflowMulL op1 op2)); 11955 effect(DEF cr, USE_KILL op1, USE op2); 11956 11957 format %{ "imulq $op1, $op2\t# overflow check long" %} 11958 ins_encode %{ 11959 __ imulq($op1$$Register, $op2$$Register); 11960 %} 11961 ins_pipe(ialu_reg_reg_alu0); 11962 %} 11963 11964 instruct overflowMulL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2, rRegL tmp) 11965 %{ 11966 match(Set cr (OverflowMulL op1 op2)); 11967 effect(DEF cr, TEMP tmp, USE op1, USE op2); 11968 11969 format %{ "imulq $tmp, $op1, $op2\t# overflow check long" %} 11970 ins_encode %{ 11971 __ imulq($tmp$$Register, $op1$$Register, $op2$$constant); 11972 %} 11973 ins_pipe(ialu_reg_reg_alu0); 11974 %} 11975 11976 11977 //----------Control Flow Instructions------------------------------------------ 11978 // Signed compare Instructions 11979 11980 // XXX more variants!! 11981 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2) 11982 %{ 11983 match(Set cr (CmpI op1 op2)); 11984 effect(DEF cr, USE op1, USE op2); 11985 11986 format %{ "cmpl $op1, $op2" %} 11987 ins_encode %{ 11988 __ cmpl($op1$$Register, $op2$$Register); 11989 %} 11990 ins_pipe(ialu_cr_reg_reg); 11991 %} 11992 11993 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2) 11994 %{ 11995 match(Set cr (CmpI op1 op2)); 11996 11997 format %{ "cmpl $op1, $op2" %} 11998 ins_encode %{ 11999 __ cmpl($op1$$Register, $op2$$constant); 12000 %} 12001 ins_pipe(ialu_cr_reg_imm); 12002 %} 12003 12004 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2) 12005 %{ 12006 match(Set cr (CmpI op1 (LoadI op2))); 12007 12008 ins_cost(500); // XXX 12009 format %{ "cmpl $op1, $op2" %} 12010 ins_encode %{ 12011 __ cmpl($op1$$Register, $op2$$Address); 12012 %} 12013 ins_pipe(ialu_cr_reg_mem); 12014 %} 12015 12016 instruct testI_reg(rFlagsReg cr, rRegI src, immI_0 zero) 12017 %{ 12018 match(Set cr (CmpI src zero)); 12019 12020 format %{ "testl $src, $src" %} 12021 ins_encode %{ 12022 __ testl($src$$Register, $src$$Register); 12023 %} 12024 ins_pipe(ialu_cr_reg_imm); 12025 %} 12026 12027 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI_0 zero) 12028 %{ 12029 match(Set cr (CmpI (AndI src con) zero)); 12030 12031 format %{ "testl $src, $con" %} 12032 ins_encode %{ 12033 __ testl($src$$Register, $con$$constant); 12034 %} 12035 ins_pipe(ialu_cr_reg_imm); 12036 %} 12037 12038 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI_0 zero) 12039 %{ 12040 match(Set cr (CmpI (AndI src (LoadI mem)) zero)); 12041 12042 format %{ "testl $src, $mem" %} 12043 ins_encode %{ 12044 __ testl($src$$Register, $mem$$Address); 12045 %} 12046 ins_pipe(ialu_cr_reg_mem); 12047 %} 12048 12049 // Unsigned compare Instructions; really, same as signed except they 12050 // produce an rFlagsRegU instead of rFlagsReg. 12051 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2) 12052 %{ 12053 match(Set cr (CmpU op1 op2)); 12054 12055 format %{ "cmpl $op1, $op2\t# unsigned" %} 12056 ins_encode %{ 12057 __ cmpl($op1$$Register, $op2$$Register); 12058 %} 12059 ins_pipe(ialu_cr_reg_reg); 12060 %} 12061 12062 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2) 12063 %{ 12064 match(Set cr (CmpU op1 op2)); 12065 12066 format %{ "cmpl $op1, $op2\t# unsigned" %} 12067 ins_encode %{ 12068 __ cmpl($op1$$Register, $op2$$constant); 12069 %} 12070 ins_pipe(ialu_cr_reg_imm); 12071 %} 12072 12073 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2) 12074 %{ 12075 match(Set cr (CmpU op1 (LoadI op2))); 12076 12077 ins_cost(500); // XXX 12078 format %{ "cmpl $op1, $op2\t# unsigned" %} 12079 ins_encode %{ 12080 __ cmpl($op1$$Register, $op2$$Address); 12081 %} 12082 ins_pipe(ialu_cr_reg_mem); 12083 %} 12084 12085 // // // Cisc-spilled version of cmpU_rReg 12086 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2) 12087 // //%{ 12088 // // match(Set cr (CmpU (LoadI op1) op2)); 12089 // // 12090 // // format %{ "CMPu $op1,$op2" %} 12091 // // ins_cost(500); 12092 // // opcode(0x39); /* Opcode 39 /r */ 12093 // // ins_encode( OpcP, reg_mem( op1, op2) ); 12094 // //%} 12095 12096 instruct testU_reg(rFlagsRegU cr, rRegI src, immI_0 zero) 12097 %{ 12098 match(Set cr (CmpU src zero)); 12099 12100 format %{ "testl $src, $src\t# unsigned" %} 12101 ins_encode %{ 12102 __ testl($src$$Register, $src$$Register); 12103 %} 12104 ins_pipe(ialu_cr_reg_imm); 12105 %} 12106 12107 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2) 12108 %{ 12109 match(Set cr (CmpP op1 op2)); 12110 12111 format %{ "cmpq $op1, $op2\t# ptr" %} 12112 ins_encode %{ 12113 __ cmpq($op1$$Register, $op2$$Register); 12114 %} 12115 ins_pipe(ialu_cr_reg_reg); 12116 %} 12117 12118 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2) 12119 %{ 12120 match(Set cr (CmpP op1 (LoadP op2))); 12121 predicate(n->in(2)->as_Load()->barrier_data() == 0); 12122 12123 ins_cost(500); // XXX 12124 format %{ "cmpq $op1, $op2\t# ptr" %} 12125 ins_encode %{ 12126 __ cmpq($op1$$Register, $op2$$Address); 12127 %} 12128 ins_pipe(ialu_cr_reg_mem); 12129 %} 12130 12131 // // // Cisc-spilled version of cmpP_rReg 12132 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2) 12133 // //%{ 12134 // // match(Set cr (CmpP (LoadP op1) op2)); 12135 // // 12136 // // format %{ "CMPu $op1,$op2" %} 12137 // // ins_cost(500); 12138 // // opcode(0x39); /* Opcode 39 /r */ 12139 // // ins_encode( OpcP, reg_mem( op1, op2) ); 12140 // //%} 12141 12142 // XXX this is generalized by compP_rReg_mem??? 12143 // Compare raw pointer (used in out-of-heap check). 12144 // Only works because non-oop pointers must be raw pointers 12145 // and raw pointers have no anti-dependencies. 12146 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2) 12147 %{ 12148 predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none && 12149 n->in(2)->as_Load()->barrier_data() == 0); 12150 match(Set cr (CmpP op1 (LoadP op2))); 12151 12152 format %{ "cmpq $op1, $op2\t# raw ptr" %} 12153 ins_encode %{ 12154 __ cmpq($op1$$Register, $op2$$Address); 12155 %} 12156 ins_pipe(ialu_cr_reg_mem); 12157 %} 12158 12159 // This will generate a signed flags result. This should be OK since 12160 // any compare to a zero should be eq/neq. 12161 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero) 12162 %{ 12163 match(Set cr (CmpP src zero)); 12164 12165 format %{ "testq $src, $src\t# ptr" %} 12166 ins_encode %{ 12167 __ testq($src$$Register, $src$$Register); 12168 %} 12169 ins_pipe(ialu_cr_reg_imm); 12170 %} 12171 12172 // This will generate a signed flags result. This should be OK since 12173 // any compare to a zero should be eq/neq. 12174 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero) 12175 %{ 12176 predicate((!UseCompressedOops || (CompressedOops::base() != NULL)) && 12177 n->in(1)->as_Load()->barrier_data() == 0); 12178 match(Set cr (CmpP (LoadP op) zero)); 12179 12180 ins_cost(500); // XXX 12181 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %} 12182 ins_encode %{ 12183 __ testq($op$$Address, 0xFFFFFFFF); 12184 %} 12185 ins_pipe(ialu_cr_reg_imm); 12186 %} 12187 12188 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero) 12189 %{ 12190 predicate(UseCompressedOops && (CompressedOops::base() == NULL) && 12191 n->in(1)->as_Load()->barrier_data() == 0); 12192 match(Set cr (CmpP (LoadP mem) zero)); 12193 12194 format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %} 12195 ins_encode %{ 12196 __ cmpq(r12, $mem$$Address); 12197 %} 12198 ins_pipe(ialu_cr_reg_mem); 12199 %} 12200 12201 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2) 12202 %{ 12203 match(Set cr (CmpN op1 op2)); 12204 12205 format %{ "cmpl $op1, $op2\t# compressed ptr" %} 12206 ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %} 12207 ins_pipe(ialu_cr_reg_reg); 12208 %} 12209 12210 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem) 12211 %{ 12212 match(Set cr (CmpN src (LoadN mem))); 12213 12214 format %{ "cmpl $src, $mem\t# compressed ptr" %} 12215 ins_encode %{ 12216 __ cmpl($src$$Register, $mem$$Address); 12217 %} 12218 ins_pipe(ialu_cr_reg_mem); 12219 %} 12220 12221 instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{ 12222 match(Set cr (CmpN op1 op2)); 12223 12224 format %{ "cmpl $op1, $op2\t# compressed ptr" %} 12225 ins_encode %{ 12226 __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant); 12227 %} 12228 ins_pipe(ialu_cr_reg_imm); 12229 %} 12230 12231 instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src) 12232 %{ 12233 match(Set cr (CmpN src (LoadN mem))); 12234 12235 format %{ "cmpl $mem, $src\t# compressed ptr" %} 12236 ins_encode %{ 12237 __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant); 12238 %} 12239 ins_pipe(ialu_cr_reg_mem); 12240 %} 12241 12242 instruct compN_rReg_imm_klass(rFlagsRegU cr, rRegN op1, immNKlass op2) %{ 12243 match(Set cr (CmpN op1 op2)); 12244 12245 format %{ "cmpl $op1, $op2\t# compressed klass ptr" %} 12246 ins_encode %{ 12247 __ cmp_narrow_klass($op1$$Register, (Klass*)$op2$$constant); 12248 %} 12249 ins_pipe(ialu_cr_reg_imm); 12250 %} 12251 12252 instruct compN_mem_imm_klass(rFlagsRegU cr, memory mem, immNKlass src) 12253 %{ 12254 predicate(!UseCompactObjectHeaders); 12255 match(Set cr (CmpN src (LoadNKlass mem))); 12256 12257 format %{ "cmpl $mem, $src\t# compressed klass ptr" %} 12258 ins_encode %{ 12259 __ cmp_narrow_klass($mem$$Address, (Klass*)$src$$constant); 12260 %} 12261 ins_pipe(ialu_cr_reg_mem); 12262 %} 12263 12264 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{ 12265 match(Set cr (CmpN src zero)); 12266 12267 format %{ "testl $src, $src\t# compressed ptr" %} 12268 ins_encode %{ __ testl($src$$Register, $src$$Register); %} 12269 ins_pipe(ialu_cr_reg_imm); 12270 %} 12271 12272 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero) 12273 %{ 12274 predicate(CompressedOops::base() != NULL); 12275 match(Set cr (CmpN (LoadN mem) zero)); 12276 12277 ins_cost(500); // XXX 12278 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %} 12279 ins_encode %{ 12280 __ cmpl($mem$$Address, (int)0xFFFFFFFF); 12281 %} 12282 ins_pipe(ialu_cr_reg_mem); 12283 %} 12284 12285 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero) 12286 %{ 12287 predicate(CompressedOops::base() == NULL); 12288 match(Set cr (CmpN (LoadN mem) zero)); 12289 12290 format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %} 12291 ins_encode %{ 12292 __ cmpl(r12, $mem$$Address); 12293 %} 12294 ins_pipe(ialu_cr_reg_mem); 12295 %} 12296 12297 // Yanked all unsigned pointer compare operations. 12298 // Pointer compares are done with CmpP which is already unsigned. 12299 12300 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2) 12301 %{ 12302 match(Set cr (CmpL op1 op2)); 12303 12304 format %{ "cmpq $op1, $op2" %} 12305 ins_encode %{ 12306 __ cmpq($op1$$Register, $op2$$Register); 12307 %} 12308 ins_pipe(ialu_cr_reg_reg); 12309 %} 12310 12311 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2) 12312 %{ 12313 match(Set cr (CmpL op1 op2)); 12314 12315 format %{ "cmpq $op1, $op2" %} 12316 ins_encode %{ 12317 __ cmpq($op1$$Register, $op2$$constant); 12318 %} 12319 ins_pipe(ialu_cr_reg_imm); 12320 %} 12321 12322 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2) 12323 %{ 12324 match(Set cr (CmpL op1 (LoadL op2))); 12325 12326 format %{ "cmpq $op1, $op2" %} 12327 ins_encode %{ 12328 __ cmpq($op1$$Register, $op2$$Address); 12329 %} 12330 ins_pipe(ialu_cr_reg_mem); 12331 %} 12332 12333 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero) 12334 %{ 12335 match(Set cr (CmpL src zero)); 12336 12337 format %{ "testq $src, $src" %} 12338 ins_encode %{ 12339 __ testq($src$$Register, $src$$Register); 12340 %} 12341 ins_pipe(ialu_cr_reg_imm); 12342 %} 12343 12344 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero) 12345 %{ 12346 match(Set cr (CmpL (AndL src con) zero)); 12347 12348 format %{ "testq $src, $con\t# long" %} 12349 ins_encode %{ 12350 __ testq($src$$Register, $con$$constant); 12351 %} 12352 ins_pipe(ialu_cr_reg_imm); 12353 %} 12354 12355 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero) 12356 %{ 12357 match(Set cr (CmpL (AndL src (LoadL mem)) zero)); 12358 12359 format %{ "testq $src, $mem" %} 12360 ins_encode %{ 12361 __ testq($src$$Register, $mem$$Address); 12362 %} 12363 ins_pipe(ialu_cr_reg_mem); 12364 %} 12365 12366 instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero) 12367 %{ 12368 match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero)); 12369 12370 format %{ "testq $src, $mem" %} 12371 ins_encode %{ 12372 __ testq($src$$Register, $mem$$Address); 12373 %} 12374 ins_pipe(ialu_cr_reg_mem); 12375 %} 12376 12377 // Manifest a CmpL result in an integer register. Very painful. 12378 // This is the test to avoid. 12379 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags) 12380 %{ 12381 match(Set dst (CmpL3 src1 src2)); 12382 effect(KILL flags); 12383 12384 ins_cost(275); // XXX 12385 format %{ "cmpq $src1, $src2\t# CmpL3\n\t" 12386 "movl $dst, -1\n\t" 12387 "jl,s done\n\t" 12388 "setne $dst\n\t" 12389 "movzbl $dst, $dst\n\t" 12390 "done:" %} 12391 ins_encode %{ 12392 Label done; 12393 __ cmpq($src1$$Register, $src2$$Register); 12394 __ movl($dst$$Register, -1); 12395 __ jccb(Assembler::less, done); 12396 __ setne($dst$$Register); 12397 __ movzbl($dst$$Register, $dst$$Register); 12398 __ bind(done); 12399 %} 12400 ins_pipe(pipe_slow); 12401 %} 12402 12403 // Unsigned long compare Instructions; really, same as signed long except they 12404 // produce an rFlagsRegU instead of rFlagsReg. 12405 instruct compUL_rReg(rFlagsRegU cr, rRegL op1, rRegL op2) 12406 %{ 12407 match(Set cr (CmpUL op1 op2)); 12408 12409 format %{ "cmpq $op1, $op2\t# unsigned" %} 12410 ins_encode %{ 12411 __ cmpq($op1$$Register, $op2$$Register); 12412 %} 12413 ins_pipe(ialu_cr_reg_reg); 12414 %} 12415 12416 instruct compUL_rReg_imm(rFlagsRegU cr, rRegL op1, immL32 op2) 12417 %{ 12418 match(Set cr (CmpUL op1 op2)); 12419 12420 format %{ "cmpq $op1, $op2\t# unsigned" %} 12421 ins_encode %{ 12422 __ cmpq($op1$$Register, $op2$$constant); 12423 %} 12424 ins_pipe(ialu_cr_reg_imm); 12425 %} 12426 12427 instruct compUL_rReg_mem(rFlagsRegU cr, rRegL op1, memory op2) 12428 %{ 12429 match(Set cr (CmpUL op1 (LoadL op2))); 12430 12431 format %{ "cmpq $op1, $op2\t# unsigned" %} 12432 ins_encode %{ 12433 __ cmpq($op1$$Register, $op2$$Address); 12434 %} 12435 ins_pipe(ialu_cr_reg_mem); 12436 %} 12437 12438 instruct testUL_reg(rFlagsRegU cr, rRegL src, immL0 zero) 12439 %{ 12440 match(Set cr (CmpUL src zero)); 12441 12442 format %{ "testq $src, $src\t# unsigned" %} 12443 ins_encode %{ 12444 __ testq($src$$Register, $src$$Register); 12445 %} 12446 ins_pipe(ialu_cr_reg_imm); 12447 %} 12448 12449 instruct compB_mem_imm(rFlagsReg cr, memory mem, immI8 imm) 12450 %{ 12451 match(Set cr (CmpI (LoadB mem) imm)); 12452 12453 ins_cost(125); 12454 format %{ "cmpb $mem, $imm" %} 12455 ins_encode %{ __ cmpb($mem$$Address, $imm$$constant); %} 12456 ins_pipe(ialu_cr_reg_mem); 12457 %} 12458 12459 instruct testUB_mem_imm(rFlagsReg cr, memory mem, immU7 imm, immI_0 zero) 12460 %{ 12461 match(Set cr (CmpI (AndI (LoadUB mem) imm) zero)); 12462 12463 ins_cost(125); 12464 format %{ "testb $mem, $imm\t# ubyte" %} 12465 ins_encode %{ __ testb($mem$$Address, $imm$$constant); %} 12466 ins_pipe(ialu_cr_reg_mem); 12467 %} 12468 12469 instruct testB_mem_imm(rFlagsReg cr, memory mem, immI8 imm, immI_0 zero) 12470 %{ 12471 match(Set cr (CmpI (AndI (LoadB mem) imm) zero)); 12472 12473 ins_cost(125); 12474 format %{ "testb $mem, $imm\t# byte" %} 12475 ins_encode %{ __ testb($mem$$Address, $imm$$constant); %} 12476 ins_pipe(ialu_cr_reg_mem); 12477 %} 12478 12479 //----------Max and Min-------------------------------------------------------- 12480 // Min Instructions 12481 12482 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr) 12483 %{ 12484 effect(USE_DEF dst, USE src, USE cr); 12485 12486 format %{ "cmovlgt $dst, $src\t# min" %} 12487 ins_encode %{ 12488 __ cmovl(Assembler::greater, $dst$$Register, $src$$Register); 12489 %} 12490 ins_pipe(pipe_cmov_reg); 12491 %} 12492 12493 12494 instruct minI_rReg(rRegI dst, rRegI src) 12495 %{ 12496 match(Set dst (MinI dst src)); 12497 12498 ins_cost(200); 12499 expand %{ 12500 rFlagsReg cr; 12501 compI_rReg(cr, dst, src); 12502 cmovI_reg_g(dst, src, cr); 12503 %} 12504 %} 12505 12506 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr) 12507 %{ 12508 effect(USE_DEF dst, USE src, USE cr); 12509 12510 format %{ "cmovllt $dst, $src\t# max" %} 12511 ins_encode %{ 12512 __ cmovl(Assembler::less, $dst$$Register, $src$$Register); 12513 %} 12514 ins_pipe(pipe_cmov_reg); 12515 %} 12516 12517 12518 instruct maxI_rReg(rRegI dst, rRegI src) 12519 %{ 12520 match(Set dst (MaxI dst src)); 12521 12522 ins_cost(200); 12523 expand %{ 12524 rFlagsReg cr; 12525 compI_rReg(cr, dst, src); 12526 cmovI_reg_l(dst, src, cr); 12527 %} 12528 %} 12529 12530 // ============================================================================ 12531 // Branch Instructions 12532 12533 // Jump Direct - Label defines a relative address from JMP+1 12534 instruct jmpDir(label labl) 12535 %{ 12536 match(Goto); 12537 effect(USE labl); 12538 12539 ins_cost(300); 12540 format %{ "jmp $labl" %} 12541 size(5); 12542 ins_encode %{ 12543 Label* L = $labl$$label; 12544 __ jmp(*L, false); // Always long jump 12545 %} 12546 ins_pipe(pipe_jmp); 12547 %} 12548 12549 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12550 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl) 12551 %{ 12552 match(If cop cr); 12553 effect(USE labl); 12554 12555 ins_cost(300); 12556 format %{ "j$cop $labl" %} 12557 size(6); 12558 ins_encode %{ 12559 Label* L = $labl$$label; 12560 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12561 %} 12562 ins_pipe(pipe_jcc); 12563 %} 12564 12565 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12566 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl) 12567 %{ 12568 predicate(!n->has_vector_mask_set()); 12569 match(CountedLoopEnd cop cr); 12570 effect(USE labl); 12571 12572 ins_cost(300); 12573 format %{ "j$cop $labl\t# loop end" %} 12574 size(6); 12575 ins_encode %{ 12576 Label* L = $labl$$label; 12577 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12578 %} 12579 ins_pipe(pipe_jcc); 12580 %} 12581 12582 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12583 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12584 predicate(!n->has_vector_mask_set()); 12585 match(CountedLoopEnd cop cmp); 12586 effect(USE labl); 12587 12588 ins_cost(300); 12589 format %{ "j$cop,u $labl\t# loop end" %} 12590 size(6); 12591 ins_encode %{ 12592 Label* L = $labl$$label; 12593 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12594 %} 12595 ins_pipe(pipe_jcc); 12596 %} 12597 12598 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12599 predicate(!n->has_vector_mask_set()); 12600 match(CountedLoopEnd cop cmp); 12601 effect(USE labl); 12602 12603 ins_cost(200); 12604 format %{ "j$cop,u $labl\t# loop end" %} 12605 size(6); 12606 ins_encode %{ 12607 Label* L = $labl$$label; 12608 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12609 %} 12610 ins_pipe(pipe_jcc); 12611 %} 12612 12613 // mask version 12614 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12615 // Bounded mask operand used in following patten is needed for 12616 // post-loop multiversioning. 12617 instruct jmpLoopEnd_and_restoreMask(cmpOp cop, kReg_K1 ktmp, rFlagsReg cr, label labl) 12618 %{ 12619 predicate(PostLoopMultiversioning && n->has_vector_mask_set()); 12620 match(CountedLoopEnd cop cr); 12621 effect(USE labl, TEMP ktmp); 12622 12623 ins_cost(400); 12624 format %{ "j$cop $labl\t# loop end\n\t" 12625 "restorevectmask \t# vector mask restore for loops" %} 12626 size(10); 12627 ins_encode %{ 12628 Label* L = $labl$$label; 12629 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12630 __ restorevectmask($ktmp$$KRegister); 12631 %} 12632 ins_pipe(pipe_jcc); 12633 %} 12634 12635 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12636 // Bounded mask operand used in following patten is needed for 12637 // post-loop multiversioning. 12638 instruct jmpLoopEndU_and_restoreMask(cmpOpU cop, kReg_K1 ktmp, rFlagsRegU cmp, label labl) %{ 12639 predicate(PostLoopMultiversioning && n->has_vector_mask_set()); 12640 match(CountedLoopEnd cop cmp); 12641 effect(USE labl, TEMP ktmp); 12642 12643 ins_cost(400); 12644 format %{ "j$cop,u $labl\t# loop end\n\t" 12645 "restorevectmask \t# vector mask restore for loops" %} 12646 size(10); 12647 ins_encode %{ 12648 Label* L = $labl$$label; 12649 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12650 __ restorevectmask($ktmp$$KRegister); 12651 %} 12652 ins_pipe(pipe_jcc); 12653 %} 12654 12655 // Bounded mask operand used in following patten is needed for 12656 // post-loop multiversioning. 12657 instruct jmpLoopEndUCF_and_restoreMask(cmpOpUCF cop, kReg_K1 ktmp, rFlagsRegUCF cmp, label labl) %{ 12658 predicate(PostLoopMultiversioning && n->has_vector_mask_set()); 12659 match(CountedLoopEnd cop cmp); 12660 effect(USE labl, TEMP ktmp); 12661 12662 ins_cost(300); 12663 format %{ "j$cop,u $labl\t# loop end\n\t" 12664 "restorevectmask \t# vector mask restore for loops" %} 12665 size(10); 12666 ins_encode %{ 12667 Label* L = $labl$$label; 12668 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12669 __ restorevectmask($ktmp$$KRegister); 12670 %} 12671 ins_pipe(pipe_jcc); 12672 %} 12673 12674 // Jump Direct Conditional - using unsigned comparison 12675 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12676 match(If cop cmp); 12677 effect(USE labl); 12678 12679 ins_cost(300); 12680 format %{ "j$cop,u $labl" %} 12681 size(6); 12682 ins_encode %{ 12683 Label* L = $labl$$label; 12684 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12685 %} 12686 ins_pipe(pipe_jcc); 12687 %} 12688 12689 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12690 match(If cop cmp); 12691 effect(USE labl); 12692 12693 ins_cost(200); 12694 format %{ "j$cop,u $labl" %} 12695 size(6); 12696 ins_encode %{ 12697 Label* L = $labl$$label; 12698 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump 12699 %} 12700 ins_pipe(pipe_jcc); 12701 %} 12702 12703 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ 12704 match(If cop cmp); 12705 effect(USE labl); 12706 12707 ins_cost(200); 12708 format %{ $$template 12709 if ($cop$$cmpcode == Assembler::notEqual) { 12710 $$emit$$"jp,u $labl\n\t" 12711 $$emit$$"j$cop,u $labl" 12712 } else { 12713 $$emit$$"jp,u done\n\t" 12714 $$emit$$"j$cop,u $labl\n\t" 12715 $$emit$$"done:" 12716 } 12717 %} 12718 ins_encode %{ 12719 Label* l = $labl$$label; 12720 if ($cop$$cmpcode == Assembler::notEqual) { 12721 __ jcc(Assembler::parity, *l, false); 12722 __ jcc(Assembler::notEqual, *l, false); 12723 } else if ($cop$$cmpcode == Assembler::equal) { 12724 Label done; 12725 __ jccb(Assembler::parity, done); 12726 __ jcc(Assembler::equal, *l, false); 12727 __ bind(done); 12728 } else { 12729 ShouldNotReachHere(); 12730 } 12731 %} 12732 ins_pipe(pipe_jcc); 12733 %} 12734 12735 // ============================================================================ 12736 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary 12737 // superklass array for an instance of the superklass. Set a hidden 12738 // internal cache on a hit (cache is checked with exposed code in 12739 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The 12740 // encoding ALSO sets flags. 12741 12742 instruct partialSubtypeCheck(rdi_RegP result, 12743 rsi_RegP sub, rax_RegP super, rcx_RegI rcx, 12744 rFlagsReg cr) 12745 %{ 12746 match(Set result (PartialSubtypeCheck sub super)); 12747 effect(KILL rcx, KILL cr); 12748 12749 ins_cost(1100); // slightly larger than the next version 12750 format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t" 12751 "movl rcx, [rdi + Array<Klass*>::length_offset_in_bytes()]\t# length to scan\n\t" 12752 "addq rdi, Array<Klass*>::base_offset_in_bytes()\t# Skip to start of data; set NZ in case count is zero\n\t" 12753 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t" 12754 "jne,s miss\t\t# Missed: rdi not-zero\n\t" 12755 "movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t" 12756 "xorq $result, $result\t\t Hit: rdi zero\n\t" 12757 "miss:\t" %} 12758 12759 opcode(0x1); // Force a XOR of RDI 12760 ins_encode(enc_PartialSubtypeCheck()); 12761 ins_pipe(pipe_slow); 12762 %} 12763 12764 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr, 12765 rsi_RegP sub, rax_RegP super, rcx_RegI rcx, 12766 immP0 zero, 12767 rdi_RegP result) 12768 %{ 12769 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero)); 12770 effect(KILL rcx, KILL result); 12771 12772 ins_cost(1000); 12773 format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t" 12774 "movl rcx, [rdi + Array<Klass*>::length_offset_in_bytes()]\t# length to scan\n\t" 12775 "addq rdi, Array<Klass*>::base_offset_in_bytes()\t# Skip to start of data; set NZ in case count is zero\n\t" 12776 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t" 12777 "jne,s miss\t\t# Missed: flags nz\n\t" 12778 "movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t" 12779 "miss:\t" %} 12780 12781 opcode(0x0); // No need to XOR RDI 12782 ins_encode(enc_PartialSubtypeCheck()); 12783 ins_pipe(pipe_slow); 12784 %} 12785 12786 // ============================================================================ 12787 // Branch Instructions -- short offset versions 12788 // 12789 // These instructions are used to replace jumps of a long offset (the default 12790 // match) with jumps of a shorter offset. These instructions are all tagged 12791 // with the ins_short_branch attribute, which causes the ADLC to suppress the 12792 // match rules in general matching. Instead, the ADLC generates a conversion 12793 // method in the MachNode which can be used to do in-place replacement of the 12794 // long variant with the shorter variant. The compiler will determine if a 12795 // branch can be taken by the is_short_branch_offset() predicate in the machine 12796 // specific code section of the file. 12797 12798 // Jump Direct - Label defines a relative address from JMP+1 12799 instruct jmpDir_short(label labl) %{ 12800 match(Goto); 12801 effect(USE labl); 12802 12803 ins_cost(300); 12804 format %{ "jmp,s $labl" %} 12805 size(2); 12806 ins_encode %{ 12807 Label* L = $labl$$label; 12808 __ jmpb(*L); 12809 %} 12810 ins_pipe(pipe_jmp); 12811 ins_short_branch(1); 12812 %} 12813 12814 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12815 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{ 12816 match(If cop cr); 12817 effect(USE labl); 12818 12819 ins_cost(300); 12820 format %{ "j$cop,s $labl" %} 12821 size(2); 12822 ins_encode %{ 12823 Label* L = $labl$$label; 12824 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12825 %} 12826 ins_pipe(pipe_jcc); 12827 ins_short_branch(1); 12828 %} 12829 12830 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12831 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{ 12832 match(CountedLoopEnd cop cr); 12833 effect(USE labl); 12834 12835 ins_cost(300); 12836 format %{ "j$cop,s $labl\t# loop end" %} 12837 size(2); 12838 ins_encode %{ 12839 Label* L = $labl$$label; 12840 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12841 %} 12842 ins_pipe(pipe_jcc); 12843 ins_short_branch(1); 12844 %} 12845 12846 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12847 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12848 match(CountedLoopEnd cop cmp); 12849 effect(USE labl); 12850 12851 ins_cost(300); 12852 format %{ "j$cop,us $labl\t# loop end" %} 12853 size(2); 12854 ins_encode %{ 12855 Label* L = $labl$$label; 12856 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12857 %} 12858 ins_pipe(pipe_jcc); 12859 ins_short_branch(1); 12860 %} 12861 12862 instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12863 match(CountedLoopEnd cop cmp); 12864 effect(USE labl); 12865 12866 ins_cost(300); 12867 format %{ "j$cop,us $labl\t# loop end" %} 12868 size(2); 12869 ins_encode %{ 12870 Label* L = $labl$$label; 12871 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12872 %} 12873 ins_pipe(pipe_jcc); 12874 ins_short_branch(1); 12875 %} 12876 12877 // Jump Direct Conditional - using unsigned comparison 12878 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12879 match(If cop cmp); 12880 effect(USE labl); 12881 12882 ins_cost(300); 12883 format %{ "j$cop,us $labl" %} 12884 size(2); 12885 ins_encode %{ 12886 Label* L = $labl$$label; 12887 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12888 %} 12889 ins_pipe(pipe_jcc); 12890 ins_short_branch(1); 12891 %} 12892 12893 instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12894 match(If cop cmp); 12895 effect(USE labl); 12896 12897 ins_cost(300); 12898 format %{ "j$cop,us $labl" %} 12899 size(2); 12900 ins_encode %{ 12901 Label* L = $labl$$label; 12902 __ jccb((Assembler::Condition)($cop$$cmpcode), *L); 12903 %} 12904 ins_pipe(pipe_jcc); 12905 ins_short_branch(1); 12906 %} 12907 12908 instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ 12909 match(If cop cmp); 12910 effect(USE labl); 12911 12912 ins_cost(300); 12913 format %{ $$template 12914 if ($cop$$cmpcode == Assembler::notEqual) { 12915 $$emit$$"jp,u,s $labl\n\t" 12916 $$emit$$"j$cop,u,s $labl" 12917 } else { 12918 $$emit$$"jp,u,s done\n\t" 12919 $$emit$$"j$cop,u,s $labl\n\t" 12920 $$emit$$"done:" 12921 } 12922 %} 12923 size(4); 12924 ins_encode %{ 12925 Label* l = $labl$$label; 12926 if ($cop$$cmpcode == Assembler::notEqual) { 12927 __ jccb(Assembler::parity, *l); 12928 __ jccb(Assembler::notEqual, *l); 12929 } else if ($cop$$cmpcode == Assembler::equal) { 12930 Label done; 12931 __ jccb(Assembler::parity, done); 12932 __ jccb(Assembler::equal, *l); 12933 __ bind(done); 12934 } else { 12935 ShouldNotReachHere(); 12936 } 12937 %} 12938 ins_pipe(pipe_jcc); 12939 ins_short_branch(1); 12940 %} 12941 12942 // ============================================================================ 12943 // inlined locking and unlocking 12944 12945 instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rdx_RegI scr, rRegI cx1, rRegI cx2) %{ 12946 predicate(Compile::current()->use_rtm()); 12947 match(Set cr (FastLock object box)); 12948 effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box); 12949 ins_cost(300); 12950 format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %} 12951 ins_encode %{ 12952 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, 12953 $scr$$Register, $cx1$$Register, $cx2$$Register, r15_thread, 12954 _counters, _rtm_counters, _stack_rtm_counters, 12955 ((Method*)(ra_->C->method()->constant_encoding()))->method_data(), 12956 true, ra_->C->profile_rtm()); 12957 %} 12958 ins_pipe(pipe_slow); 12959 %} 12960 12961 instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr, rRegP cx1) %{ 12962 predicate(!Compile::current()->use_rtm()); 12963 match(Set cr (FastLock object box)); 12964 effect(TEMP tmp, TEMP scr, TEMP cx1, USE_KILL box); 12965 ins_cost(300); 12966 format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %} 12967 ins_encode %{ 12968 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, 12969 $scr$$Register, $cx1$$Register, noreg, r15_thread, _counters, NULL, NULL, NULL, false, false); 12970 %} 12971 ins_pipe(pipe_slow); 12972 %} 12973 12974 instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{ 12975 match(Set cr (FastUnlock object box)); 12976 effect(TEMP tmp, USE_KILL box); 12977 ins_cost(300); 12978 format %{ "fastunlock $object,$box\t! kills $box,$tmp" %} 12979 ins_encode %{ 12980 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm()); 12981 %} 12982 ins_pipe(pipe_slow); 12983 %} 12984 12985 12986 // ============================================================================ 12987 // Safepoint Instructions 12988 instruct safePoint_poll_tls(rFlagsReg cr, rRegP poll) 12989 %{ 12990 match(SafePoint poll); 12991 effect(KILL cr, USE poll); 12992 12993 format %{ "testl rax, [$poll]\t" 12994 "# Safepoint: poll for GC" %} 12995 ins_cost(125); 12996 size(4); /* setting an explicit size will cause debug builds to assert if size is incorrect */ 12997 ins_encode %{ 12998 __ relocate(relocInfo::poll_type); 12999 address pre_pc = __ pc(); 13000 __ testl(rax, Address($poll$$Register, 0)); 13001 assert(nativeInstruction_at(pre_pc)->is_safepoint_poll(), "must emit test %%eax [reg]"); 13002 %} 13003 ins_pipe(ialu_reg_mem); 13004 %} 13005 13006 // ============================================================================ 13007 // Procedure Call/Return Instructions 13008 // Call Java Static Instruction 13009 // Note: If this code changes, the corresponding ret_addr_offset() and 13010 // compute_padding() functions will have to be adjusted. 13011 instruct CallStaticJavaDirect(method meth) %{ 13012 match(CallStaticJava); 13013 effect(USE meth); 13014 13015 ins_cost(300); 13016 format %{ "call,static " %} 13017 opcode(0xE8); /* E8 cd */ 13018 ins_encode(clear_avx, Java_Static_Call(meth), call_epilog); 13019 ins_pipe(pipe_slow); 13020 ins_alignment(4); 13021 %} 13022 13023 // Call Java Dynamic Instruction 13024 // Note: If this code changes, the corresponding ret_addr_offset() and 13025 // compute_padding() functions will have to be adjusted. 13026 instruct CallDynamicJavaDirect(method meth) 13027 %{ 13028 match(CallDynamicJava); 13029 effect(USE meth); 13030 13031 ins_cost(300); 13032 format %{ "movq rax, #Universe::non_oop_word()\n\t" 13033 "call,dynamic " %} 13034 ins_encode(clear_avx, Java_Dynamic_Call(meth), call_epilog); 13035 ins_pipe(pipe_slow); 13036 ins_alignment(4); 13037 %} 13038 13039 // Call Runtime Instruction 13040 instruct CallRuntimeDirect(method meth) 13041 %{ 13042 match(CallRuntime); 13043 effect(USE meth); 13044 13045 ins_cost(300); 13046 format %{ "call,runtime " %} 13047 ins_encode(clear_avx, Java_To_Runtime(meth)); 13048 ins_pipe(pipe_slow); 13049 %} 13050 13051 // Call runtime without safepoint 13052 instruct CallLeafDirect(method meth) 13053 %{ 13054 match(CallLeaf); 13055 effect(USE meth); 13056 13057 ins_cost(300); 13058 format %{ "call_leaf,runtime " %} 13059 ins_encode(clear_avx, Java_To_Runtime(meth)); 13060 ins_pipe(pipe_slow); 13061 %} 13062 13063 // Call runtime without safepoint and with vector arguments 13064 instruct CallLeafDirectVector(method meth) 13065 %{ 13066 match(CallLeafVector); 13067 effect(USE meth); 13068 13069 ins_cost(300); 13070 format %{ "call_leaf,vector " %} 13071 ins_encode(Java_To_Runtime(meth)); 13072 ins_pipe(pipe_slow); 13073 %} 13074 13075 // 13076 instruct CallNativeDirect(method meth) 13077 %{ 13078 match(CallNative); 13079 effect(USE meth); 13080 13081 ins_cost(300); 13082 format %{ "call_native " %} 13083 ins_encode(clear_avx, Java_To_Runtime(meth)); 13084 ins_pipe(pipe_slow); 13085 %} 13086 13087 // Call runtime without safepoint 13088 instruct CallLeafNoFPDirect(method meth) 13089 %{ 13090 match(CallLeafNoFP); 13091 effect(USE meth); 13092 13093 ins_cost(300); 13094 format %{ "call_leaf_nofp,runtime " %} 13095 ins_encode(clear_avx, Java_To_Runtime(meth)); 13096 ins_pipe(pipe_slow); 13097 %} 13098 13099 // Return Instruction 13100 // Remove the return address & jump to it. 13101 // Notice: We always emit a nop after a ret to make sure there is room 13102 // for safepoint patching 13103 instruct Ret() 13104 %{ 13105 match(Return); 13106 13107 format %{ "ret" %} 13108 ins_encode %{ 13109 __ ret(0); 13110 %} 13111 ins_pipe(pipe_jmp); 13112 %} 13113 13114 // Tail Call; Jump from runtime stub to Java code. 13115 // Also known as an 'interprocedural jump'. 13116 // Target of jump will eventually return to caller. 13117 // TailJump below removes the return address. 13118 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_ptr) 13119 %{ 13120 match(TailCall jump_target method_ptr); 13121 13122 ins_cost(300); 13123 format %{ "jmp $jump_target\t# rbx holds method" %} 13124 ins_encode %{ 13125 __ jmp($jump_target$$Register); 13126 %} 13127 ins_pipe(pipe_jmp); 13128 %} 13129 13130 // Tail Jump; remove the return address; jump to target. 13131 // TailCall above leaves the return address around. 13132 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop) 13133 %{ 13134 match(TailJump jump_target ex_oop); 13135 13136 ins_cost(300); 13137 format %{ "popq rdx\t# pop return address\n\t" 13138 "jmp $jump_target" %} 13139 ins_encode %{ 13140 __ popq(as_Register(RDX_enc)); 13141 __ jmp($jump_target$$Register); 13142 %} 13143 ins_pipe(pipe_jmp); 13144 %} 13145 13146 // Create exception oop: created by stack-crawling runtime code. 13147 // Created exception is now available to this handler, and is setup 13148 // just prior to jumping to this handler. No code emitted. 13149 instruct CreateException(rax_RegP ex_oop) 13150 %{ 13151 match(Set ex_oop (CreateEx)); 13152 13153 size(0); 13154 // use the following format syntax 13155 format %{ "# exception oop is in rax; no code emitted" %} 13156 ins_encode(); 13157 ins_pipe(empty); 13158 %} 13159 13160 // Rethrow exception: 13161 // The exception oop will come in the first argument position. 13162 // Then JUMP (not call) to the rethrow stub code. 13163 instruct RethrowException() 13164 %{ 13165 match(Rethrow); 13166 13167 // use the following format syntax 13168 format %{ "jmp rethrow_stub" %} 13169 ins_encode(enc_rethrow); 13170 ins_pipe(pipe_jmp); 13171 %} 13172 13173 // ============================================================================ 13174 // This name is KNOWN by the ADLC and cannot be changed. 13175 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 13176 // for this guy. 13177 instruct tlsLoadP(r15_RegP dst) %{ 13178 match(Set dst (ThreadLocal)); 13179 effect(DEF dst); 13180 13181 size(0); 13182 format %{ "# TLS is in R15" %} 13183 ins_encode( /*empty encoding*/ ); 13184 ins_pipe(ialu_reg_reg); 13185 %} 13186 13187 13188 //----------PEEPHOLE RULES----------------------------------------------------- 13189 // These must follow all instruction definitions as they use the names 13190 // defined in the instructions definitions. 13191 // 13192 // peepmatch ( root_instr_name [preceding_instruction]* ); 13193 // 13194 // peepconstraint %{ 13195 // (instruction_number.operand_name relational_op instruction_number.operand_name 13196 // [, ...] ); 13197 // // instruction numbers are zero-based using left to right order in peepmatch 13198 // 13199 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 13200 // // provide an instruction_number.operand_name for each operand that appears 13201 // // in the replacement instruction's match rule 13202 // 13203 // ---------VM FLAGS--------------------------------------------------------- 13204 // 13205 // All peephole optimizations can be turned off using -XX:-OptoPeephole 13206 // 13207 // Each peephole rule is given an identifying number starting with zero and 13208 // increasing by one in the order seen by the parser. An individual peephole 13209 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 13210 // on the command-line. 13211 // 13212 // ---------CURRENT LIMITATIONS---------------------------------------------- 13213 // 13214 // Only match adjacent instructions in same basic block 13215 // Only equality constraints 13216 // Only constraints between operands, not (0.dest_reg == RAX_enc) 13217 // Only one replacement instruction 13218 // 13219 // ---------EXAMPLE---------------------------------------------------------- 13220 // 13221 // // pertinent parts of existing instructions in architecture description 13222 // instruct movI(rRegI dst, rRegI src) 13223 // %{ 13224 // match(Set dst (CopyI src)); 13225 // %} 13226 // 13227 // instruct incI_rReg(rRegI dst, immI_1 src, rFlagsReg cr) 13228 // %{ 13229 // match(Set dst (AddI dst src)); 13230 // effect(KILL cr); 13231 // %} 13232 // 13233 // // Change (inc mov) to lea 13234 // peephole %{ 13235 // // increment preceeded by register-register move 13236 // peepmatch ( incI_rReg movI ); 13237 // // require that the destination register of the increment 13238 // // match the destination register of the move 13239 // peepconstraint ( 0.dst == 1.dst ); 13240 // // construct a replacement instruction that sets 13241 // // the destination to ( move's source register + one ) 13242 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) ); 13243 // %} 13244 // 13245 13246 // Implementation no longer uses movX instructions since 13247 // machine-independent system no longer uses CopyX nodes. 13248 // 13249 // peephole 13250 // %{ 13251 // peepmatch (incI_rReg movI); 13252 // peepconstraint (0.dst == 1.dst); 13253 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 13254 // %} 13255 13256 // peephole 13257 // %{ 13258 // peepmatch (decI_rReg movI); 13259 // peepconstraint (0.dst == 1.dst); 13260 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 13261 // %} 13262 13263 // peephole 13264 // %{ 13265 // peepmatch (addI_rReg_imm movI); 13266 // peepconstraint (0.dst == 1.dst); 13267 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 13268 // %} 13269 13270 // peephole 13271 // %{ 13272 // peepmatch (incL_rReg movL); 13273 // peepconstraint (0.dst == 1.dst); 13274 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 13275 // %} 13276 13277 // peephole 13278 // %{ 13279 // peepmatch (decL_rReg movL); 13280 // peepconstraint (0.dst == 1.dst); 13281 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 13282 // %} 13283 13284 // peephole 13285 // %{ 13286 // peepmatch (addL_rReg_imm movL); 13287 // peepconstraint (0.dst == 1.dst); 13288 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 13289 // %} 13290 13291 // peephole 13292 // %{ 13293 // peepmatch (addP_rReg_imm movP); 13294 // peepconstraint (0.dst == 1.dst); 13295 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src)); 13296 // %} 13297 13298 // // Change load of spilled value to only a spill 13299 // instruct storeI(memory mem, rRegI src) 13300 // %{ 13301 // match(Set mem (StoreI mem src)); 13302 // %} 13303 // 13304 // instruct loadI(rRegI dst, memory mem) 13305 // %{ 13306 // match(Set dst (LoadI mem)); 13307 // %} 13308 // 13309 13310 peephole 13311 %{ 13312 peepmatch (loadI storeI); 13313 peepconstraint (1.src == 0.dst, 1.mem == 0.mem); 13314 peepreplace (storeI(1.mem 1.mem 1.src)); 13315 %} 13316 13317 peephole 13318 %{ 13319 peepmatch (loadL storeL); 13320 peepconstraint (1.src == 0.dst, 1.mem == 0.mem); 13321 peepreplace (storeL(1.mem 1.mem 1.src)); 13322 %} 13323 13324 //----------SMARTSPILL RULES--------------------------------------------------- 13325 // These must follow all instruction definitions as they use the names 13326 // defined in the instructions definitions.