1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "compiler/compiler_globals.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "ci/ciInlineKlass.hpp"
32 #include "crc32c.h"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/tlab_globals.hpp"
37 #include "interpreter/bytecodeHistogram.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interpreterRuntime.hpp"
40 #include "jvm.h"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/accessDecorators.hpp"
44 #include "oops/compressedKlass.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/klass.inline.hpp"
47 #include "oops/resolvedFieldEntry.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/javaThread.hpp"
53 #include "runtime/jniHandles.hpp"
54 #include "runtime/objectMonitor.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/safepoint.hpp"
57 #include "runtime/safepointMechanism.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/signature_cc.hpp"
60 #include "runtime/stubRoutines.hpp"
61 #include "utilities/checkedCast.hpp"
62 #include "utilities/globalDefinitions.hpp"
63 #include "utilities/macros.hpp"
64 #include "vmreg_x86.inline.hpp"
65 #ifdef COMPILER2
66 #include "opto/output.hpp"
67 #endif
68
69 #ifdef PRODUCT
70 #define BLOCK_COMMENT(str) /* nothing */
71 #define STOP(error) stop(error)
72 #else
73 #define BLOCK_COMMENT(str) block_comment(str)
74 #define STOP(error) block_comment(error); stop(error)
75 #endif
76
77 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
78
79 #ifdef ASSERT
80 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
81 #endif
82
83 static const Assembler::Condition reverse[] = {
84 Assembler::noOverflow /* overflow = 0x0 */ ,
85 Assembler::overflow /* noOverflow = 0x1 */ ,
86 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
87 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
88 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
89 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
90 Assembler::above /* belowEqual = 0x6 */ ,
91 Assembler::belowEqual /* above = 0x7 */ ,
92 Assembler::positive /* negative = 0x8 */ ,
93 Assembler::negative /* positive = 0x9 */ ,
94 Assembler::noParity /* parity = 0xa */ ,
95 Assembler::parity /* noParity = 0xb */ ,
96 Assembler::greaterEqual /* less = 0xc */ ,
97 Assembler::less /* greaterEqual = 0xd */ ,
98 Assembler::greater /* lessEqual = 0xe */ ,
99 Assembler::lessEqual /* greater = 0xf, */
100
101 };
102
103
104 // Implementation of MacroAssembler
105
106 Address MacroAssembler::as_Address(AddressLiteral adr) {
107 // amd64 always does this as a pc-rel
108 // we can be absolute or disp based on the instruction type
109 // jmp/call are displacements others are absolute
110 assert(!adr.is_lval(), "must be rval");
111 assert(reachable(adr), "must be");
112 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
113
114 }
115
116 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
117 AddressLiteral base = adr.base();
118 lea(rscratch, base);
119 Address index = adr.index();
120 assert(index._disp == 0, "must not have disp"); // maybe it can?
121 Address array(rscratch, index._index, index._scale, index._disp);
122 return array;
123 }
124
125 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
126 Label L, E;
127
128 #ifdef _WIN64
129 // Windows always allocates space for it's register args
130 assert(num_args <= 4, "only register arguments supported");
131 subq(rsp, frame::arg_reg_save_area_bytes);
132 #endif
133
134 // Align stack if necessary
135 testl(rsp, 15);
136 jcc(Assembler::zero, L);
137
138 subq(rsp, 8);
139 call(RuntimeAddress(entry_point));
140 addq(rsp, 8);
141 jmp(E);
142
143 bind(L);
144 call(RuntimeAddress(entry_point));
145
146 bind(E);
147
148 #ifdef _WIN64
149 // restore stack pointer
150 addq(rsp, frame::arg_reg_save_area_bytes);
151 #endif
152 }
153
154 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
155 assert(!src2.is_lval(), "should use cmpptr");
156 assert(rscratch != noreg || always_reachable(src2), "missing");
157
158 if (reachable(src2)) {
159 cmpq(src1, as_Address(src2));
160 } else {
161 lea(rscratch, src2);
162 Assembler::cmpq(src1, Address(rscratch, 0));
163 }
164 }
165
166 int MacroAssembler::corrected_idivq(Register reg) {
167 // Full implementation of Java ldiv and lrem; checks for special
168 // case as described in JVM spec., p.243 & p.271. The function
169 // returns the (pc) offset of the idivl instruction - may be needed
170 // for implicit exceptions.
171 //
172 // normal case special case
173 //
174 // input : rax: dividend min_long
175 // reg: divisor (may not be eax/edx) -1
176 //
177 // output: rax: quotient (= rax idiv reg) min_long
178 // rdx: remainder (= rax irem reg) 0
179 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
180 static const int64_t min_long = 0x8000000000000000;
181 Label normal_case, special_case;
182
183 // check for special case
184 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
185 jcc(Assembler::notEqual, normal_case);
186 xorl(rdx, rdx); // prepare rdx for possible special case (where
187 // remainder = 0)
188 cmpq(reg, -1);
189 jcc(Assembler::equal, special_case);
190
191 // handle normal case
192 bind(normal_case);
193 cdqq();
194 int idivq_offset = offset();
195 idivq(reg);
196
197 // normal and special case exit
198 bind(special_case);
199
200 return idivq_offset;
201 }
202
203 void MacroAssembler::decrementq(Register reg, int value) {
204 if (value == min_jint) { subq(reg, value); return; }
205 if (value < 0) { incrementq(reg, -value); return; }
206 if (value == 0) { ; return; }
207 if (value == 1 && UseIncDec) { decq(reg) ; return; }
208 /* else */ { subq(reg, value) ; return; }
209 }
210
211 void MacroAssembler::decrementq(Address dst, int value) {
212 if (value == min_jint) { subq(dst, value); return; }
213 if (value < 0) { incrementq(dst, -value); return; }
214 if (value == 0) { ; return; }
215 if (value == 1 && UseIncDec) { decq(dst) ; return; }
216 /* else */ { subq(dst, value) ; return; }
217 }
218
219 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
220 assert(rscratch != noreg || always_reachable(dst), "missing");
221
222 if (reachable(dst)) {
223 incrementq(as_Address(dst));
224 } else {
225 lea(rscratch, dst);
226 incrementq(Address(rscratch, 0));
227 }
228 }
229
230 void MacroAssembler::incrementq(Register reg, int value) {
231 if (value == min_jint) { addq(reg, value); return; }
232 if (value < 0) { decrementq(reg, -value); return; }
233 if (value == 0) { ; return; }
234 if (value == 1 && UseIncDec) { incq(reg) ; return; }
235 /* else */ { addq(reg, value) ; return; }
236 }
237
238 void MacroAssembler::incrementq(Address dst, int value) {
239 if (value == min_jint) { addq(dst, value); return; }
240 if (value < 0) { decrementq(dst, -value); return; }
241 if (value == 0) { ; return; }
242 if (value == 1 && UseIncDec) { incq(dst) ; return; }
243 /* else */ { addq(dst, value) ; return; }
244 }
245
246 // 32bit can do a case table jump in one instruction but we no longer allow the base
247 // to be installed in the Address class
248 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
249 lea(rscratch, entry.base());
250 Address dispatch = entry.index();
251 assert(dispatch._base == noreg, "must be");
252 dispatch._base = rscratch;
253 jmp(dispatch);
254 }
255
256 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
257 ShouldNotReachHere(); // 64bit doesn't use two regs
258 cmpq(x_lo, y_lo);
259 }
260
261 void MacroAssembler::lea(Register dst, AddressLiteral src) {
262 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
263 }
264
265 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
266 lea(rscratch, adr);
267 movptr(dst, rscratch);
268 }
269
270 void MacroAssembler::leave() {
271 // %%% is this really better? Why not on 32bit too?
272 emit_int8((unsigned char)0xC9); // LEAVE
273 }
274
275 void MacroAssembler::lneg(Register hi, Register lo) {
276 ShouldNotReachHere(); // 64bit doesn't use two regs
277 negq(lo);
278 }
279
280 void MacroAssembler::movoop(Register dst, jobject obj) {
281 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
282 }
283
284 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
285 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
286 movq(dst, rscratch);
287 }
288
289 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
290 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
291 }
292
293 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
294 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
295 movq(dst, rscratch);
296 }
297
298 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
299 if (src.is_lval()) {
300 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
301 } else {
302 if (reachable(src)) {
303 movq(dst, as_Address(src));
304 } else {
305 lea(dst, src);
306 movq(dst, Address(dst, 0));
307 }
308 }
309 }
310
311 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
312 movq(as_Address(dst, rscratch), src);
313 }
314
315 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
316 movq(dst, as_Address(src, dst /*rscratch*/));
317 }
318
319 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
320 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
321 if (is_simm32(src)) {
322 movptr(dst, checked_cast<int32_t>(src));
323 } else {
324 mov64(rscratch, src);
325 movq(dst, rscratch);
326 }
327 }
328
329 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
330 movoop(rscratch, obj);
331 push(rscratch);
332 }
333
334 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
335 mov_metadata(rscratch, obj);
336 push(rscratch);
337 }
338
339 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
340 lea(rscratch, src);
341 if (src.is_lval()) {
342 push(rscratch);
343 } else {
344 pushq(Address(rscratch, 0));
345 }
346 }
347
348 static void pass_arg0(MacroAssembler* masm, Register arg) {
349 if (c_rarg0 != arg ) {
350 masm->mov(c_rarg0, arg);
351 }
352 }
353
354 static void pass_arg1(MacroAssembler* masm, Register arg) {
355 if (c_rarg1 != arg ) {
356 masm->mov(c_rarg1, arg);
357 }
358 }
359
360 static void pass_arg2(MacroAssembler* masm, Register arg) {
361 if (c_rarg2 != arg ) {
362 masm->mov(c_rarg2, arg);
363 }
364 }
365
366 static void pass_arg3(MacroAssembler* masm, Register arg) {
367 if (c_rarg3 != arg ) {
368 masm->mov(c_rarg3, arg);
369 }
370 }
371
372 void MacroAssembler::stop(const char* msg) {
373 if (ShowMessageBoxOnError) {
374 address rip = pc();
375 pusha(); // get regs on stack
376 lea(c_rarg1, InternalAddress(rip));
377 movq(c_rarg2, rsp); // pass pointer to regs array
378 }
379 // Skip AOT caching C strings in scratch buffer.
380 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
381 lea(c_rarg0, ExternalAddress((address) str));
382 andq(rsp, -16); // align stack as required by ABI
383 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
384 hlt();
385 }
386
387 void MacroAssembler::warn(const char* msg) {
388 push(rbp);
389 movq(rbp, rsp);
390 andq(rsp, -16); // align stack as required by push_CPU_state and call
391 push_CPU_state(); // keeps alignment at 16 bytes
392
393 #ifdef _WIN64
394 // Windows always allocates space for its register args
395 subq(rsp, frame::arg_reg_save_area_bytes);
396 #endif
397 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
398 lea(c_rarg0, ExternalAddress((address) str));
399 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
400
401 #ifdef _WIN64
402 // restore stack pointer
403 addq(rsp, frame::arg_reg_save_area_bytes);
404 #endif
405 pop_CPU_state();
406 mov(rsp, rbp);
407 pop(rbp);
408 }
409
410 void MacroAssembler::print_state() {
411 address rip = pc();
412 pusha(); // get regs on stack
413 push(rbp);
414 movq(rbp, rsp);
415 andq(rsp, -16); // align stack as required by push_CPU_state and call
416 push_CPU_state(); // keeps alignment at 16 bytes
417
418 lea(c_rarg0, InternalAddress(rip));
419 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
420 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
421
422 pop_CPU_state();
423 mov(rsp, rbp);
424 pop(rbp);
425 popa();
426 }
427
428 #ifndef PRODUCT
429 extern "C" void findpc(intptr_t x);
430 #endif
431
432 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
433 // In order to get locks to work, we need to fake a in_VM state
434 if (ShowMessageBoxOnError) {
435 JavaThread* thread = JavaThread::current();
436 JavaThreadState saved_state = thread->thread_state();
437 thread->set_thread_state(_thread_in_vm);
438 #ifndef PRODUCT
439 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
440 ttyLocker ttyl;
441 BytecodeCounter::print();
442 }
443 #endif
444 // To see where a verify_oop failed, get $ebx+40/X for this frame.
445 // XXX correct this offset for amd64
446 // This is the value of eip which points to where verify_oop will return.
447 if (os::message_box(msg, "Execution stopped, print registers?")) {
448 print_state64(pc, regs);
449 BREAKPOINT;
450 }
451 }
452 fatal("DEBUG MESSAGE: %s", msg);
453 }
454
455 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
456 ttyLocker ttyl;
457 DebuggingContext debugging{};
458 tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
459 #ifndef PRODUCT
460 tty->cr();
461 findpc(pc);
462 tty->cr();
463 #endif
464 #define PRINT_REG(rax, value) \
465 { tty->print("%s = ", #rax); os::print_location(tty, value); }
466 PRINT_REG(rax, regs[15]);
467 PRINT_REG(rbx, regs[12]);
468 PRINT_REG(rcx, regs[14]);
469 PRINT_REG(rdx, regs[13]);
470 PRINT_REG(rdi, regs[8]);
471 PRINT_REG(rsi, regs[9]);
472 PRINT_REG(rbp, regs[10]);
473 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
474 PRINT_REG(rsp, (intptr_t)(®s[16]));
475 PRINT_REG(r8 , regs[7]);
476 PRINT_REG(r9 , regs[6]);
477 PRINT_REG(r10, regs[5]);
478 PRINT_REG(r11, regs[4]);
479 PRINT_REG(r12, regs[3]);
480 PRINT_REG(r13, regs[2]);
481 PRINT_REG(r14, regs[1]);
482 PRINT_REG(r15, regs[0]);
483 #undef PRINT_REG
484 // Print some words near the top of the stack.
485 int64_t* rsp = ®s[16];
486 int64_t* dump_sp = rsp;
487 for (int col1 = 0; col1 < 8; col1++) {
488 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
489 os::print_location(tty, *dump_sp++);
490 }
491 for (int row = 0; row < 25; row++) {
492 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
493 for (int col = 0; col < 4; col++) {
494 tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
495 }
496 tty->cr();
497 }
498 // Print some instructions around pc:
499 Disassembler::decode((address)pc-64, (address)pc);
500 tty->print_cr("--------");
501 Disassembler::decode((address)pc, (address)pc+32);
502 }
503
504 // The java_calling_convention describes stack locations as ideal slots on
505 // a frame with no abi restrictions. Since we must observe abi restrictions
506 // (like the placement of the register window) the slots must be biased by
507 // the following value.
508 static int reg2offset_in(VMReg r) {
509 // Account for saved rbp and return address
510 // This should really be in_preserve_stack_slots
511 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
512 }
513
514 static int reg2offset_out(VMReg r) {
515 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
516 }
517
518 // A long move
519 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
520
521 // The calling conventions assures us that each VMregpair is either
522 // all really one physical register or adjacent stack slots.
523
524 if (src.is_single_phys_reg() ) {
525 if (dst.is_single_phys_reg()) {
526 if (dst.first() != src.first()) {
527 mov(dst.first()->as_Register(), src.first()->as_Register());
528 }
529 } else {
530 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
531 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
532 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
533 }
534 } else if (dst.is_single_phys_reg()) {
535 assert(src.is_single_reg(), "not a stack pair");
536 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
537 } else {
538 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
539 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
540 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
541 }
542 }
543
544 // A double move
545 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
546
547 // The calling conventions assures us that each VMregpair is either
548 // all really one physical register or adjacent stack slots.
549
550 if (src.is_single_phys_reg() ) {
551 if (dst.is_single_phys_reg()) {
552 // In theory these overlap but the ordering is such that this is likely a nop
553 if ( src.first() != dst.first()) {
554 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
555 }
556 } else {
557 assert(dst.is_single_reg(), "not a stack pair");
558 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
559 }
560 } else if (dst.is_single_phys_reg()) {
561 assert(src.is_single_reg(), "not a stack pair");
562 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
563 } else {
564 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
565 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
566 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
567 }
568 }
569
570
571 // A float arg may have to do float reg int reg conversion
572 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
573 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
574
575 // The calling conventions assures us that each VMregpair is either
576 // all really one physical register or adjacent stack slots.
577
578 if (src.first()->is_stack()) {
579 if (dst.first()->is_stack()) {
580 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
581 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
582 } else {
583 // stack to reg
584 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
585 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
586 }
587 } else if (dst.first()->is_stack()) {
588 // reg to stack
589 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
590 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
591 } else {
592 // reg to reg
593 // In theory these overlap but the ordering is such that this is likely a nop
594 if ( src.first() != dst.first()) {
595 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
596 }
597 }
598 }
599
600 // On 64 bit we will store integer like items to the stack as
601 // 64 bits items (x86_32/64 abi) even though java would only store
602 // 32bits for a parameter. On 32bit it will simply be 32 bits
603 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
604 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
605 if (src.first()->is_stack()) {
606 if (dst.first()->is_stack()) {
607 // stack to stack
608 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
609 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
610 } else {
611 // stack to reg
612 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
613 }
614 } else if (dst.first()->is_stack()) {
615 // reg to stack
616 // Do we really have to sign extend???
617 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
618 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
619 } else {
620 // Do we really have to sign extend???
621 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
622 if (dst.first() != src.first()) {
623 movq(dst.first()->as_Register(), src.first()->as_Register());
624 }
625 }
626 }
627
628 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
629 if (src.first()->is_stack()) {
630 if (dst.first()->is_stack()) {
631 // stack to stack
632 movq(rax, Address(rbp, reg2offset_in(src.first())));
633 movq(Address(rsp, reg2offset_out(dst.first())), rax);
634 } else {
635 // stack to reg
636 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
637 }
638 } else if (dst.first()->is_stack()) {
639 // reg to stack
640 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
641 } else {
642 if (dst.first() != src.first()) {
643 movq(dst.first()->as_Register(), src.first()->as_Register());
644 }
645 }
646 }
647
648 // An oop arg. Must pass a handle not the oop itself
649 void MacroAssembler::object_move(OopMap* map,
650 int oop_handle_offset,
651 int framesize_in_slots,
652 VMRegPair src,
653 VMRegPair dst,
654 bool is_receiver,
655 int* receiver_offset) {
656
657 // must pass a handle. First figure out the location we use as a handle
658
659 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
660
661 // See if oop is null if it is we need no handle
662
663 if (src.first()->is_stack()) {
664
665 // Oop is already on the stack as an argument
666 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
667 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
668 if (is_receiver) {
669 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
670 }
671
672 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
673 lea(rHandle, Address(rbp, reg2offset_in(src.first())));
674 // conditionally move a null
675 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
676 } else {
677
678 // Oop is in a register we must store it to the space we reserve
679 // on the stack for oop_handles and pass a handle if oop is non-null
680
681 const Register rOop = src.first()->as_Register();
682 int oop_slot;
683 if (rOop == j_rarg0)
684 oop_slot = 0;
685 else if (rOop == j_rarg1)
686 oop_slot = 1;
687 else if (rOop == j_rarg2)
688 oop_slot = 2;
689 else if (rOop == j_rarg3)
690 oop_slot = 3;
691 else if (rOop == j_rarg4)
692 oop_slot = 4;
693 else {
694 assert(rOop == j_rarg5, "wrong register");
695 oop_slot = 5;
696 }
697
698 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
699 int offset = oop_slot*VMRegImpl::stack_slot_size;
700
701 map->set_oop(VMRegImpl::stack2reg(oop_slot));
702 // Store oop in handle area, may be null
703 movptr(Address(rsp, offset), rOop);
704 if (is_receiver) {
705 *receiver_offset = offset;
706 }
707
708 cmpptr(rOop, NULL_WORD);
709 lea(rHandle, Address(rsp, offset));
710 // conditionally move a null from the handle area where it was just stored
711 cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
712 }
713
714 // If arg is on the stack then place it otherwise it is already in correct reg.
715 if (dst.first()->is_stack()) {
716 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
717 }
718 }
719
720 void MacroAssembler::addptr(Register dst, int32_t imm32) {
721 addq(dst, imm32);
722 }
723
724 void MacroAssembler::addptr(Register dst, Register src) {
725 addq(dst, src);
726 }
727
728 void MacroAssembler::addptr(Address dst, Register src) {
729 addq(dst, src);
730 }
731
732 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
733 assert(rscratch != noreg || always_reachable(src), "missing");
734
735 if (reachable(src)) {
736 Assembler::addsd(dst, as_Address(src));
737 } else {
738 lea(rscratch, src);
739 Assembler::addsd(dst, Address(rscratch, 0));
740 }
741 }
742
743 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
744 assert(rscratch != noreg || always_reachable(src), "missing");
745
746 if (reachable(src)) {
747 addss(dst, as_Address(src));
748 } else {
749 lea(rscratch, src);
750 addss(dst, Address(rscratch, 0));
751 }
752 }
753
754 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
755 assert(rscratch != noreg || always_reachable(src), "missing");
756
757 if (reachable(src)) {
758 Assembler::addpd(dst, as_Address(src));
759 } else {
760 lea(rscratch, src);
761 Assembler::addpd(dst, Address(rscratch, 0));
762 }
763 }
764
765 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only.
766 // Stub code is generated once and never copied.
767 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
768 void MacroAssembler::align64() {
769 align(64, (uint)(uintptr_t)pc());
770 }
771
772 void MacroAssembler::align32() {
773 align(32, (uint)(uintptr_t)pc());
774 }
775
776 void MacroAssembler::align(uint modulus) {
777 // 8273459: Ensure alignment is possible with current segment alignment
778 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
779 align(modulus, offset());
780 }
781
782 void MacroAssembler::align(uint modulus, uint target) {
783 if (target % modulus != 0) {
784 nop(modulus - (target % modulus));
785 }
786 }
787
788 void MacroAssembler::push_f(XMMRegister r) {
789 subptr(rsp, wordSize);
790 movflt(Address(rsp, 0), r);
791 }
792
793 void MacroAssembler::pop_f(XMMRegister r) {
794 movflt(r, Address(rsp, 0));
795 addptr(rsp, wordSize);
796 }
797
798 void MacroAssembler::push_d(XMMRegister r) {
799 subptr(rsp, 2 * wordSize);
800 movdbl(Address(rsp, 0), r);
801 }
802
803 void MacroAssembler::pop_d(XMMRegister r) {
804 movdbl(r, Address(rsp, 0));
805 addptr(rsp, 2 * Interpreter::stackElementSize);
806 }
807
808 void MacroAssembler::push_ppx(Register src) {
809 if (VM_Version::supports_apx_f()) {
810 pushp(src);
811 } else {
812 Assembler::push(src);
813 }
814 }
815
816 void MacroAssembler::pop_ppx(Register dst) {
817 if (VM_Version::supports_apx_f()) {
818 popp(dst);
819 } else {
820 Assembler::pop(dst);
821 }
822 }
823
824 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
825 // Used in sign-masking with aligned address.
826 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
827 assert(rscratch != noreg || always_reachable(src), "missing");
828
829 if (UseAVX > 2 &&
830 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
831 (dst->encoding() >= 16)) {
832 vpand(dst, dst, src, AVX_512bit, rscratch);
833 } else if (reachable(src)) {
834 Assembler::andpd(dst, as_Address(src));
835 } else {
836 lea(rscratch, src);
837 Assembler::andpd(dst, Address(rscratch, 0));
838 }
839 }
840
841 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
842 // Used in sign-masking with aligned address.
843 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
844 assert(rscratch != noreg || always_reachable(src), "missing");
845
846 if (reachable(src)) {
847 Assembler::andps(dst, as_Address(src));
848 } else {
849 lea(rscratch, src);
850 Assembler::andps(dst, Address(rscratch, 0));
851 }
852 }
853
854 void MacroAssembler::andptr(Register dst, int32_t imm32) {
855 andq(dst, imm32);
856 }
857
858 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
859 assert(rscratch != noreg || always_reachable(src), "missing");
860
861 if (reachable(src)) {
862 andq(dst, as_Address(src));
863 } else {
864 lea(rscratch, src);
865 andq(dst, Address(rscratch, 0));
866 }
867 }
868
869 void MacroAssembler::atomic_incl(Address counter_addr) {
870 lock();
871 incrementl(counter_addr);
872 }
873
874 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
875 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
876
877 if (reachable(counter_addr)) {
878 atomic_incl(as_Address(counter_addr));
879 } else {
880 lea(rscratch, counter_addr);
881 atomic_incl(Address(rscratch, 0));
882 }
883 }
884
885 void MacroAssembler::atomic_incq(Address counter_addr) {
886 lock();
887 incrementq(counter_addr);
888 }
889
890 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
891 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
892
893 if (reachable(counter_addr)) {
894 atomic_incq(as_Address(counter_addr));
895 } else {
896 lea(rscratch, counter_addr);
897 atomic_incq(Address(rscratch, 0));
898 }
899 }
900
901 // Writes to stack successive pages until offset reached to check for
902 // stack overflow + shadow pages. This clobbers tmp.
903 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
904 movptr(tmp, rsp);
905 // Bang stack for total size given plus shadow page size.
906 // Bang one page at a time because large size can bang beyond yellow and
907 // red zones.
908 Label loop;
909 bind(loop);
910 movl(Address(tmp, (-(int)os::vm_page_size())), size );
911 subptr(tmp, (int)os::vm_page_size());
912 subl(size, (int)os::vm_page_size());
913 jcc(Assembler::greater, loop);
914
915 // Bang down shadow pages too.
916 // At this point, (tmp-0) is the last address touched, so don't
917 // touch it again. (It was touched as (tmp-pagesize) but then tmp
918 // was post-decremented.) Skip this address by starting at i=1, and
919 // touch a few more pages below. N.B. It is important to touch all
920 // the way down including all pages in the shadow zone.
921 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
922 // this could be any sized move but this is can be a debugging crumb
923 // so the bigger the better.
924 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
925 }
926 }
927
928 void MacroAssembler::reserved_stack_check() {
929 // testing if reserved zone needs to be enabled
930 Label no_reserved_zone_enabling;
931
932 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
933 jcc(Assembler::below, no_reserved_zone_enabling);
934
935 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
936 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
937 should_not_reach_here();
938
939 bind(no_reserved_zone_enabling);
940 }
941
942 void MacroAssembler::c2bool(Register x) {
943 // implements x == 0 ? 0 : 1
944 // note: must only look at least-significant byte of x
945 // since C-style booleans are stored in one byte
946 // only! (was bug)
947 andl(x, 0xFF);
948 setb(Assembler::notZero, x);
949 }
950
951 // Wouldn't need if AddressLiteral version had new name
952 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
953 Assembler::call(L, rtype);
954 }
955
956 void MacroAssembler::call(Register entry) {
957 Assembler::call(entry);
958 }
959
960 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
961 assert(rscratch != noreg || always_reachable(entry), "missing");
962
963 if (reachable(entry)) {
964 Assembler::call_literal(entry.target(), entry.rspec());
965 } else {
966 lea(rscratch, entry);
967 Assembler::call(rscratch);
968 }
969 }
970
971 void MacroAssembler::ic_call(address entry, jint method_index) {
972 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
973 // Needs full 64-bit immediate for later patching.
974 Assembler::mov64(rax, (int64_t)Universe::non_oop_word());
975 call(AddressLiteral(entry, rh));
976 }
977
978 int MacroAssembler::ic_check_size() {
979 return UseCompactObjectHeaders ? 17 : 14;
980 }
981
982 int MacroAssembler::ic_check(int end_alignment) {
983 Register receiver = j_rarg0;
984 Register data = rax;
985 Register temp = rscratch1;
986
987 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
988 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
989 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
990 // before the inline cache check here, and not after
991 align(end_alignment, offset() + ic_check_size());
992
993 int uep_offset = offset();
994
995 if (UseCompactObjectHeaders) {
996 load_narrow_klass_compact(temp, receiver);
997 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
998 } else {
999 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1000 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
1001 }
1002
1003 // if inline cache check fails, then jump to runtime routine
1004 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1005 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
1006
1007 return uep_offset;
1008 }
1009
1010 void MacroAssembler::emit_static_call_stub() {
1011 // Static stub relocation also tags the Method* in the code-stream.
1012 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1013 // This is recognized as unresolved by relocs/nativeinst/ic code.
1014 jump(RuntimeAddress(pc()));
1015 }
1016
1017 // Implementation of call_VM versions
1018
1019 void MacroAssembler::call_VM(Register oop_result,
1020 address entry_point,
1021 bool check_exceptions) {
1022 Label C, E;
1023 call(C, relocInfo::none);
1024 jmp(E);
1025
1026 bind(C);
1027 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1028 ret(0);
1029
1030 bind(E);
1031 }
1032
1033 void MacroAssembler::call_VM(Register oop_result,
1034 address entry_point,
1035 Register arg_1,
1036 bool check_exceptions) {
1037 Label C, E;
1038 call(C, relocInfo::none);
1039 jmp(E);
1040
1041 bind(C);
1042 pass_arg1(this, arg_1);
1043 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1044 ret(0);
1045
1046 bind(E);
1047 }
1048
1049 void MacroAssembler::call_VM(Register oop_result,
1050 address entry_point,
1051 Register arg_1,
1052 Register arg_2,
1053 bool check_exceptions) {
1054 Label C, E;
1055 call(C, relocInfo::none);
1056 jmp(E);
1057
1058 bind(C);
1059
1060 assert_different_registers(arg_1, c_rarg2);
1061
1062 pass_arg2(this, arg_2);
1063 pass_arg1(this, arg_1);
1064 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1065 ret(0);
1066
1067 bind(E);
1068 }
1069
1070 void MacroAssembler::call_VM(Register oop_result,
1071 address entry_point,
1072 Register arg_1,
1073 Register arg_2,
1074 Register arg_3,
1075 bool check_exceptions) {
1076 Label C, E;
1077 call(C, relocInfo::none);
1078 jmp(E);
1079
1080 bind(C);
1081
1082 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1083 assert_different_registers(arg_2, c_rarg3);
1084 pass_arg3(this, arg_3);
1085 pass_arg2(this, arg_2);
1086 pass_arg1(this, arg_1);
1087 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1088 ret(0);
1089
1090 bind(E);
1091 }
1092
1093 void MacroAssembler::call_VM(Register oop_result,
1094 Register last_java_sp,
1095 address entry_point,
1096 int number_of_arguments,
1097 bool check_exceptions) {
1098 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1099 }
1100
1101 void MacroAssembler::call_VM(Register oop_result,
1102 Register last_java_sp,
1103 address entry_point,
1104 Register arg_1,
1105 bool check_exceptions) {
1106 pass_arg1(this, arg_1);
1107 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1108 }
1109
1110 void MacroAssembler::call_VM(Register oop_result,
1111 Register last_java_sp,
1112 address entry_point,
1113 Register arg_1,
1114 Register arg_2,
1115 bool check_exceptions) {
1116
1117 assert_different_registers(arg_1, c_rarg2);
1118 pass_arg2(this, arg_2);
1119 pass_arg1(this, arg_1);
1120 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1121 }
1122
1123 void MacroAssembler::call_VM(Register oop_result,
1124 Register last_java_sp,
1125 address entry_point,
1126 Register arg_1,
1127 Register arg_2,
1128 Register arg_3,
1129 bool check_exceptions) {
1130 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1131 assert_different_registers(arg_2, c_rarg3);
1132 pass_arg3(this, arg_3);
1133 pass_arg2(this, arg_2);
1134 pass_arg1(this, arg_1);
1135 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1136 }
1137
1138 void MacroAssembler::super_call_VM(Register oop_result,
1139 Register last_java_sp,
1140 address entry_point,
1141 int number_of_arguments,
1142 bool check_exceptions) {
1143 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1144 }
1145
1146 void MacroAssembler::super_call_VM(Register oop_result,
1147 Register last_java_sp,
1148 address entry_point,
1149 Register arg_1,
1150 bool check_exceptions) {
1151 pass_arg1(this, arg_1);
1152 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1153 }
1154
1155 void MacroAssembler::super_call_VM(Register oop_result,
1156 Register last_java_sp,
1157 address entry_point,
1158 Register arg_1,
1159 Register arg_2,
1160 bool check_exceptions) {
1161
1162 assert_different_registers(arg_1, c_rarg2);
1163 pass_arg2(this, arg_2);
1164 pass_arg1(this, arg_1);
1165 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1166 }
1167
1168 void MacroAssembler::super_call_VM(Register oop_result,
1169 Register last_java_sp,
1170 address entry_point,
1171 Register arg_1,
1172 Register arg_2,
1173 Register arg_3,
1174 bool check_exceptions) {
1175 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1176 assert_different_registers(arg_2, c_rarg3);
1177 pass_arg3(this, arg_3);
1178 pass_arg2(this, arg_2);
1179 pass_arg1(this, arg_1);
1180 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1181 }
1182
1183 void MacroAssembler::call_VM_base(Register oop_result,
1184 Register last_java_sp,
1185 address entry_point,
1186 int number_of_arguments,
1187 bool check_exceptions) {
1188 Register java_thread = r15_thread;
1189
1190 // determine last_java_sp register
1191 if (!last_java_sp->is_valid()) {
1192 last_java_sp = rsp;
1193 }
1194 // debugging support
1195 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1196 #ifdef ASSERT
1197 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
1198 // r12 is the heapbase.
1199 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
1200 #endif // ASSERT
1201
1202 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1203 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1204
1205 // push java thread (becomes first argument of C function)
1206
1207 mov(c_rarg0, r15_thread);
1208
1209 // set last Java frame before call
1210 assert(last_java_sp != rbp, "can't use ebp/rbp");
1211
1212 // Only interpreter should have to set fp
1213 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
1214
1215 // do the call, remove parameters
1216 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1217
1218 #ifdef ASSERT
1219 // Check that thread register is not clobbered.
1220 guarantee(java_thread != rax, "change this code");
1221 push(rax);
1222 { Label L;
1223 get_thread_slow(rax);
1224 cmpptr(java_thread, rax);
1225 jcc(Assembler::equal, L);
1226 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
1227 bind(L);
1228 }
1229 pop(rax);
1230 #endif
1231
1232 // reset last Java frame
1233 // Only interpreter should have to clear fp
1234 reset_last_Java_frame(true);
1235
1236 // C++ interp handles this in the interpreter
1237 check_and_handle_popframe();
1238 check_and_handle_earlyret();
1239
1240 if (check_exceptions) {
1241 // check for pending exceptions (java_thread is set upon return)
1242 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1243 // This used to conditionally jump to forward_exception however it is
1244 // possible if we relocate that the branch will not reach. So we must jump
1245 // around so we can always reach
1246
1247 Label ok;
1248 jcc(Assembler::equal, ok);
1249 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1250 bind(ok);
1251 }
1252
1253 // get oop result if there is one and reset the value in the thread
1254 if (oop_result->is_valid()) {
1255 get_vm_result_oop(oop_result);
1256 }
1257 }
1258
1259 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1260 // Calculate the value for last_Java_sp somewhat subtle.
1261 // call_VM does an intermediate call which places a return address on
1262 // the stack just under the stack pointer as the user finished with it.
1263 // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
1264
1265 // We've pushed one address, correct last_Java_sp
1266 lea(rax, Address(rsp, wordSize));
1267
1268 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
1269 }
1270
1271 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
1272 void MacroAssembler::call_VM_leaf0(address entry_point) {
1273 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1274 }
1275
1276 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1277 call_VM_leaf_base(entry_point, number_of_arguments);
1278 }
1279
1280 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1281 pass_arg0(this, arg_0);
1282 call_VM_leaf(entry_point, 1);
1283 }
1284
1285 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1286
1287 assert_different_registers(arg_0, c_rarg1);
1288 pass_arg1(this, arg_1);
1289 pass_arg0(this, arg_0);
1290 call_VM_leaf(entry_point, 2);
1291 }
1292
1293 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1294 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1295 assert_different_registers(arg_1, c_rarg2);
1296 pass_arg2(this, arg_2);
1297 pass_arg1(this, arg_1);
1298 pass_arg0(this, arg_0);
1299 call_VM_leaf(entry_point, 3);
1300 }
1301
1302 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1303 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1304 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1305 assert_different_registers(arg_2, c_rarg3);
1306 pass_arg3(this, arg_3);
1307 pass_arg2(this, arg_2);
1308 pass_arg1(this, arg_1);
1309 pass_arg0(this, arg_0);
1310 call_VM_leaf(entry_point, 3);
1311 }
1312
1313 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1314 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1315 }
1316
1317 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1318 pass_arg0(this, arg_0);
1319 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1320 }
1321
1322 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1323 assert_different_registers(arg_0, c_rarg1);
1324 pass_arg1(this, arg_1);
1325 pass_arg0(this, arg_0);
1326 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1327 }
1328
1329 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1330 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1331 assert_different_registers(arg_1, c_rarg2);
1332 pass_arg2(this, arg_2);
1333 pass_arg1(this, arg_1);
1334 pass_arg0(this, arg_0);
1335 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1336 }
1337
1338 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1339 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1340 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1341 assert_different_registers(arg_2, c_rarg3);
1342 pass_arg3(this, arg_3);
1343 pass_arg2(this, arg_2);
1344 pass_arg1(this, arg_1);
1345 pass_arg0(this, arg_0);
1346 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1347 }
1348
1349 void MacroAssembler::get_vm_result_oop(Register oop_result) {
1350 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
1351 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
1352 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1353 }
1354
1355 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
1356 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
1357 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
1358 }
1359
1360 void MacroAssembler::check_and_handle_earlyret() {
1361 }
1362
1363 void MacroAssembler::check_and_handle_popframe() {
1364 }
1365
1366 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
1367 assert(rscratch != noreg || always_reachable(src1), "missing");
1368
1369 if (reachable(src1)) {
1370 cmpl(as_Address(src1), imm);
1371 } else {
1372 lea(rscratch, src1);
1373 cmpl(Address(rscratch, 0), imm);
1374 }
1375 }
1376
1377 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
1378 assert(!src2.is_lval(), "use cmpptr");
1379 assert(rscratch != noreg || always_reachable(src2), "missing");
1380
1381 if (reachable(src2)) {
1382 cmpl(src1, as_Address(src2));
1383 } else {
1384 lea(rscratch, src2);
1385 cmpl(src1, Address(rscratch, 0));
1386 }
1387 }
1388
1389 void MacroAssembler::cmp32(Register src1, int32_t imm) {
1390 Assembler::cmpl(src1, imm);
1391 }
1392
1393 void MacroAssembler::cmp32(Register src1, Address src2) {
1394 Assembler::cmpl(src1, src2);
1395 }
1396
1397 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1398 ucomisd(opr1, opr2);
1399
1400 Label L;
1401 if (unordered_is_less) {
1402 movl(dst, -1);
1403 jcc(Assembler::parity, L);
1404 jcc(Assembler::below , L);
1405 movl(dst, 0);
1406 jcc(Assembler::equal , L);
1407 increment(dst);
1408 } else { // unordered is greater
1409 movl(dst, 1);
1410 jcc(Assembler::parity, L);
1411 jcc(Assembler::above , L);
1412 movl(dst, 0);
1413 jcc(Assembler::equal , L);
1414 decrementl(dst);
1415 }
1416 bind(L);
1417 }
1418
1419 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1420 ucomiss(opr1, opr2);
1421
1422 Label L;
1423 if (unordered_is_less) {
1424 movl(dst, -1);
1425 jcc(Assembler::parity, L);
1426 jcc(Assembler::below , L);
1427 movl(dst, 0);
1428 jcc(Assembler::equal , L);
1429 increment(dst);
1430 } else { // unordered is greater
1431 movl(dst, 1);
1432 jcc(Assembler::parity, L);
1433 jcc(Assembler::above , L);
1434 movl(dst, 0);
1435 jcc(Assembler::equal , L);
1436 decrementl(dst);
1437 }
1438 bind(L);
1439 }
1440
1441
1442 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
1443 assert(rscratch != noreg || always_reachable(src1), "missing");
1444
1445 if (reachable(src1)) {
1446 cmpb(as_Address(src1), imm);
1447 } else {
1448 lea(rscratch, src1);
1449 cmpb(Address(rscratch, 0), imm);
1450 }
1451 }
1452
1453 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
1454 assert(rscratch != noreg || always_reachable(src2), "missing");
1455
1456 if (src2.is_lval()) {
1457 movptr(rscratch, src2);
1458 Assembler::cmpq(src1, rscratch);
1459 } else if (reachable(src2)) {
1460 cmpq(src1, as_Address(src2));
1461 } else {
1462 lea(rscratch, src2);
1463 Assembler::cmpq(src1, Address(rscratch, 0));
1464 }
1465 }
1466
1467 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
1468 assert(src2.is_lval(), "not a mem-mem compare");
1469 // moves src2's literal address
1470 movptr(rscratch, src2);
1471 Assembler::cmpq(src1, rscratch);
1472 }
1473
1474 void MacroAssembler::cmpoop(Register src1, Register src2) {
1475 cmpptr(src1, src2);
1476 }
1477
1478 void MacroAssembler::cmpoop(Register src1, Address src2) {
1479 cmpptr(src1, src2);
1480 }
1481
1482 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
1483 movoop(rscratch, src2);
1484 cmpptr(src1, rscratch);
1485 }
1486
1487 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
1488 assert(rscratch != noreg || always_reachable(adr), "missing");
1489
1490 if (reachable(adr)) {
1491 lock();
1492 cmpxchgptr(reg, as_Address(adr));
1493 } else {
1494 lea(rscratch, adr);
1495 lock();
1496 cmpxchgptr(reg, Address(rscratch, 0));
1497 }
1498 }
1499
1500 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
1501 cmpxchgq(reg, adr);
1502 }
1503
1504 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1505 assert(rscratch != noreg || always_reachable(src), "missing");
1506
1507 if (reachable(src)) {
1508 Assembler::comisd(dst, as_Address(src));
1509 } else {
1510 lea(rscratch, src);
1511 Assembler::comisd(dst, Address(rscratch, 0));
1512 }
1513 }
1514
1515 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1516 assert(rscratch != noreg || always_reachable(src), "missing");
1517
1518 if (reachable(src)) {
1519 Assembler::comiss(dst, as_Address(src));
1520 } else {
1521 lea(rscratch, src);
1522 Assembler::comiss(dst, Address(rscratch, 0));
1523 }
1524 }
1525
1526
1527 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
1528 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
1529
1530 Condition negated_cond = negate_condition(cond);
1531 Label L;
1532 jcc(negated_cond, L);
1533 pushf(); // Preserve flags
1534 atomic_incl(counter_addr, rscratch);
1535 popf();
1536 bind(L);
1537 }
1538
1539 int MacroAssembler::corrected_idivl(Register reg) {
1540 // Full implementation of Java idiv and irem; checks for
1541 // special case as described in JVM spec., p.243 & p.271.
1542 // The function returns the (pc) offset of the idivl
1543 // instruction - may be needed for implicit exceptions.
1544 //
1545 // normal case special case
1546 //
1547 // input : rax,: dividend min_int
1548 // reg: divisor (may not be rax,/rdx) -1
1549 //
1550 // output: rax,: quotient (= rax, idiv reg) min_int
1551 // rdx: remainder (= rax, irem reg) 0
1552 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
1553 const int min_int = 0x80000000;
1554 Label normal_case, special_case;
1555
1556 // check for special case
1557 cmpl(rax, min_int);
1558 jcc(Assembler::notEqual, normal_case);
1559 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
1560 cmpl(reg, -1);
1561 jcc(Assembler::equal, special_case);
1562
1563 // handle normal case
1564 bind(normal_case);
1565 cdql();
1566 int idivl_offset = offset();
1567 idivl(reg);
1568
1569 // normal and special case exit
1570 bind(special_case);
1571
1572 return idivl_offset;
1573 }
1574
1575
1576
1577 void MacroAssembler::decrementl(Register reg, int value) {
1578 if (value == min_jint) {subl(reg, value) ; return; }
1579 if (value < 0) { incrementl(reg, -value); return; }
1580 if (value == 0) { ; return; }
1581 if (value == 1 && UseIncDec) { decl(reg) ; return; }
1582 /* else */ { subl(reg, value) ; return; }
1583 }
1584
1585 void MacroAssembler::decrementl(Address dst, int value) {
1586 if (value == min_jint) {subl(dst, value) ; return; }
1587 if (value < 0) { incrementl(dst, -value); return; }
1588 if (value == 0) { ; return; }
1589 if (value == 1 && UseIncDec) { decl(dst) ; return; }
1590 /* else */ { subl(dst, value) ; return; }
1591 }
1592
1593 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
1594 assert(shift_value > 0, "illegal shift value");
1595 Label _is_positive;
1596 testl (reg, reg);
1597 jcc (Assembler::positive, _is_positive);
1598 int offset = (1 << shift_value) - 1 ;
1599
1600 if (offset == 1) {
1601 incrementl(reg);
1602 } else {
1603 addl(reg, offset);
1604 }
1605
1606 bind (_is_positive);
1607 sarl(reg, shift_value);
1608 }
1609
1610 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1611 assert(rscratch != noreg || always_reachable(src), "missing");
1612
1613 if (reachable(src)) {
1614 Assembler::divsd(dst, as_Address(src));
1615 } else {
1616 lea(rscratch, src);
1617 Assembler::divsd(dst, Address(rscratch, 0));
1618 }
1619 }
1620
1621 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1622 assert(rscratch != noreg || always_reachable(src), "missing");
1623
1624 if (reachable(src)) {
1625 Assembler::divss(dst, as_Address(src));
1626 } else {
1627 lea(rscratch, src);
1628 Assembler::divss(dst, Address(rscratch, 0));
1629 }
1630 }
1631
1632 void MacroAssembler::enter() {
1633 push(rbp);
1634 mov(rbp, rsp);
1635 }
1636
1637 void MacroAssembler::post_call_nop() {
1638 if (!Continuations::enabled()) {
1639 return;
1640 }
1641 InstructionMark im(this);
1642 relocate(post_call_nop_Relocation::spec());
1643 InlineSkippedInstructionsCounter skipCounter(this);
1644 emit_int8((uint8_t)0x0f);
1645 emit_int8((uint8_t)0x1f);
1646 emit_int8((uint8_t)0x84);
1647 emit_int8((uint8_t)0x00);
1648 emit_int32(0x00);
1649 }
1650
1651 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1652 assert(rscratch != noreg || always_reachable(src), "missing");
1653 if (reachable(src)) {
1654 Assembler::mulpd(dst, as_Address(src));
1655 } else {
1656 lea(rscratch, src);
1657 Assembler::mulpd(dst, Address(rscratch, 0));
1658 }
1659 }
1660
1661 // dst = c = a * b + c
1662 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1663 Assembler::vfmadd231sd(c, a, b);
1664 if (dst != c) {
1665 movdbl(dst, c);
1666 }
1667 }
1668
1669 // dst = c = a * b + c
1670 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1671 Assembler::vfmadd231ss(c, a, b);
1672 if (dst != c) {
1673 movflt(dst, c);
1674 }
1675 }
1676
1677 // dst = c = a * b + c
1678 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1679 Assembler::vfmadd231pd(c, a, b, vector_len);
1680 if (dst != c) {
1681 vmovdqu(dst, c);
1682 }
1683 }
1684
1685 // dst = c = a * b + c
1686 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1687 Assembler::vfmadd231ps(c, a, b, vector_len);
1688 if (dst != c) {
1689 vmovdqu(dst, c);
1690 }
1691 }
1692
1693 // dst = c = a * b + c
1694 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1695 Assembler::vfmadd231pd(c, a, b, vector_len);
1696 if (dst != c) {
1697 vmovdqu(dst, c);
1698 }
1699 }
1700
1701 // dst = c = a * b + c
1702 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1703 Assembler::vfmadd231ps(c, a, b, vector_len);
1704 if (dst != c) {
1705 vmovdqu(dst, c);
1706 }
1707 }
1708
1709 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
1710 assert(rscratch != noreg || always_reachable(dst), "missing");
1711
1712 if (reachable(dst)) {
1713 incrementl(as_Address(dst));
1714 } else {
1715 lea(rscratch, dst);
1716 incrementl(Address(rscratch, 0));
1717 }
1718 }
1719
1720 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
1721 incrementl(as_Address(dst, rscratch));
1722 }
1723
1724 void MacroAssembler::incrementl(Register reg, int value) {
1725 if (value == min_jint) {addl(reg, value) ; return; }
1726 if (value < 0) { decrementl(reg, -value); return; }
1727 if (value == 0) { ; return; }
1728 if (value == 1 && UseIncDec) { incl(reg) ; return; }
1729 /* else */ { addl(reg, value) ; return; }
1730 }
1731
1732 void MacroAssembler::incrementl(Address dst, int value) {
1733 if (value == min_jint) {addl(dst, value) ; return; }
1734 if (value < 0) { decrementl(dst, -value); return; }
1735 if (value == 0) { ; return; }
1736 if (value == 1 && UseIncDec) { incl(dst) ; return; }
1737 /* else */ { addl(dst, value) ; return; }
1738 }
1739
1740 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
1741 assert(rscratch != noreg || always_reachable(dst), "missing");
1742 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
1743 if (reachable(dst)) {
1744 jmp_literal(dst.target(), dst.rspec());
1745 } else {
1746 lea(rscratch, dst);
1747 jmp(rscratch);
1748 }
1749 }
1750
1751 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
1752 assert(rscratch != noreg || always_reachable(dst), "missing");
1753 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
1754 if (reachable(dst)) {
1755 InstructionMark im(this);
1756 relocate(dst.reloc());
1757 const int short_size = 2;
1758 const int long_size = 6;
1759 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
1760 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
1761 // 0111 tttn #8-bit disp
1762 emit_int8(0x70 | cc);
1763 emit_int8((offs - short_size) & 0xFF);
1764 } else {
1765 // 0000 1111 1000 tttn #32-bit disp
1766 emit_int8(0x0F);
1767 emit_int8((unsigned char)(0x80 | cc));
1768 emit_int32(offs - long_size);
1769 }
1770 } else {
1771 #ifdef ASSERT
1772 warning("reversing conditional branch");
1773 #endif /* ASSERT */
1774 Label skip;
1775 jccb(reverse[cc], skip);
1776 lea(rscratch, dst);
1777 Assembler::jmp(rscratch);
1778 bind(skip);
1779 }
1780 }
1781
1782 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
1783 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
1784 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
1785
1786 stmxcsr(mxcsr_save);
1787 movl(tmp, mxcsr_save);
1788 if (EnableX86ECoreOpts) {
1789 // The mxcsr_std has status bits set for performance on ECore
1790 orl(tmp, 0x003f);
1791 } else {
1792 // Mask out status bits (only check control and mask bits)
1793 andl(tmp, 0xFFC0);
1794 }
1795 cmp32(tmp, mxcsr_std, rscratch);
1796 }
1797
1798 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
1799 assert(rscratch != noreg || always_reachable(src), "missing");
1800
1801 if (reachable(src)) {
1802 Assembler::ldmxcsr(as_Address(src));
1803 } else {
1804 lea(rscratch, src);
1805 Assembler::ldmxcsr(Address(rscratch, 0));
1806 }
1807 }
1808
1809 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1810 int off = offset();
1811 movsbl(dst, src); // movsxb
1812 return off;
1813 }
1814
1815 // Note: load_signed_short used to be called load_signed_word.
1816 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
1817 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
1818 // The term "word" in HotSpot means a 32- or 64-bit machine word.
1819 int MacroAssembler::load_signed_short(Register dst, Address src) {
1820 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
1821 // version but this is what 64bit has always done. This seems to imply
1822 // that users are only using 32bits worth.
1823 int off = offset();
1824 movswl(dst, src); // movsxw
1825 return off;
1826 }
1827
1828 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1829 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1830 // and "3.9 Partial Register Penalties", p. 22).
1831 int off = offset();
1832 movzbl(dst, src); // movzxb
1833 return off;
1834 }
1835
1836 // Note: load_unsigned_short used to be called load_unsigned_word.
1837 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1838 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1839 // and "3.9 Partial Register Penalties", p. 22).
1840 int off = offset();
1841 movzwl(dst, src); // movzxw
1842 return off;
1843 }
1844
1845 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1846 switch (size_in_bytes) {
1847 case 8: movq(dst, src); break;
1848 case 4: movl(dst, src); break;
1849 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1850 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1851 default: ShouldNotReachHere();
1852 }
1853 }
1854
1855 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1856 switch (size_in_bytes) {
1857 case 8: movq(dst, src); break;
1858 case 4: movl(dst, src); break;
1859 case 2: movw(dst, src); break;
1860 case 1: movb(dst, src); break;
1861 default: ShouldNotReachHere();
1862 }
1863 }
1864
1865 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
1866 assert(rscratch != noreg || always_reachable(dst), "missing");
1867
1868 if (reachable(dst)) {
1869 movl(as_Address(dst), src);
1870 } else {
1871 lea(rscratch, dst);
1872 movl(Address(rscratch, 0), src);
1873 }
1874 }
1875
1876 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
1877 if (reachable(src)) {
1878 movl(dst, as_Address(src));
1879 } else {
1880 lea(dst, src);
1881 movl(dst, Address(dst, 0));
1882 }
1883 }
1884
1885 // C++ bool manipulation
1886
1887 void MacroAssembler::movbool(Register dst, Address src) {
1888 if(sizeof(bool) == 1)
1889 movb(dst, src);
1890 else if(sizeof(bool) == 2)
1891 movw(dst, src);
1892 else if(sizeof(bool) == 4)
1893 movl(dst, src);
1894 else
1895 // unsupported
1896 ShouldNotReachHere();
1897 }
1898
1899 void MacroAssembler::movbool(Address dst, bool boolconst) {
1900 if(sizeof(bool) == 1)
1901 movb(dst, (int) boolconst);
1902 else if(sizeof(bool) == 2)
1903 movw(dst, (int) boolconst);
1904 else if(sizeof(bool) == 4)
1905 movl(dst, (int) boolconst);
1906 else
1907 // unsupported
1908 ShouldNotReachHere();
1909 }
1910
1911 void MacroAssembler::movbool(Address dst, Register src) {
1912 if(sizeof(bool) == 1)
1913 movb(dst, src);
1914 else if(sizeof(bool) == 2)
1915 movw(dst, src);
1916 else if(sizeof(bool) == 4)
1917 movl(dst, src);
1918 else
1919 // unsupported
1920 ShouldNotReachHere();
1921 }
1922
1923 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1924 assert(rscratch != noreg || always_reachable(src), "missing");
1925
1926 if (reachable(src)) {
1927 movdl(dst, as_Address(src));
1928 } else {
1929 lea(rscratch, src);
1930 movdl(dst, Address(rscratch, 0));
1931 }
1932 }
1933
1934 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
1935 assert(rscratch != noreg || always_reachable(src), "missing");
1936
1937 if (reachable(src)) {
1938 movq(dst, as_Address(src));
1939 } else {
1940 lea(rscratch, src);
1941 movq(dst, Address(rscratch, 0));
1942 }
1943 }
1944
1945 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1946 assert(rscratch != noreg || always_reachable(src), "missing");
1947
1948 if (reachable(src)) {
1949 if (UseXmmLoadAndClearUpper) {
1950 movsd (dst, as_Address(src));
1951 } else {
1952 movlpd(dst, as_Address(src));
1953 }
1954 } else {
1955 lea(rscratch, src);
1956 if (UseXmmLoadAndClearUpper) {
1957 movsd (dst, Address(rscratch, 0));
1958 } else {
1959 movlpd(dst, Address(rscratch, 0));
1960 }
1961 }
1962 }
1963
1964 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
1965 assert(rscratch != noreg || always_reachable(src), "missing");
1966
1967 if (reachable(src)) {
1968 movss(dst, as_Address(src));
1969 } else {
1970 lea(rscratch, src);
1971 movss(dst, Address(rscratch, 0));
1972 }
1973 }
1974
1975 void MacroAssembler::movhlf(XMMRegister dst, XMMRegister src, Register rscratch) {
1976 if (VM_Version::supports_avx10_2()) {
1977 evmovw(dst, src);
1978 } else {
1979 assert(rscratch != noreg, "missing");
1980 evmovw(rscratch, src);
1981 evmovw(dst, rscratch);
1982 }
1983 }
1984
1985 void MacroAssembler::mov64(Register dst, int64_t imm64) {
1986 if (is_uimm32(imm64)) {
1987 movl(dst, checked_cast<uint32_t>(imm64));
1988 } else if (is_simm32(imm64)) {
1989 movq(dst, checked_cast<int32_t>(imm64));
1990 } else {
1991 Assembler::mov64(dst, imm64);
1992 }
1993 }
1994
1995 void MacroAssembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) {
1996 Assembler::mov64(dst, imm64, rtype, format);
1997 }
1998
1999 void MacroAssembler::movptr(Register dst, Register src) {
2000 movq(dst, src);
2001 }
2002
2003 void MacroAssembler::movptr(Register dst, Address src) {
2004 movq(dst, src);
2005 }
2006
2007 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
2008 void MacroAssembler::movptr(Register dst, intptr_t src) {
2009 mov64(dst, src);
2010 }
2011
2012 void MacroAssembler::movptr(Address dst, Register src) {
2013 movq(dst, src);
2014 }
2015
2016 void MacroAssembler::movptr(Address dst, int32_t src) {
2017 movslq(dst, src);
2018 }
2019
2020 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
2021 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2022 Assembler::movdqu(dst, src);
2023 }
2024
2025 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
2026 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2027 Assembler::movdqu(dst, src);
2028 }
2029
2030 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
2031 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2032 Assembler::movdqu(dst, src);
2033 }
2034
2035 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2036 assert(rscratch != noreg || always_reachable(src), "missing");
2037
2038 if (reachable(src)) {
2039 movdqu(dst, as_Address(src));
2040 } else {
2041 lea(rscratch, src);
2042 movdqu(dst, Address(rscratch, 0));
2043 }
2044 }
2045
2046 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
2047 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2048 Assembler::vmovdqu(dst, src);
2049 }
2050
2051 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
2052 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2053 Assembler::vmovdqu(dst, src);
2054 }
2055
2056 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
2057 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2058 Assembler::vmovdqu(dst, src);
2059 }
2060
2061 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2062 assert(rscratch != noreg || always_reachable(src), "missing");
2063
2064 if (reachable(src)) {
2065 vmovdqu(dst, as_Address(src));
2066 }
2067 else {
2068 lea(rscratch, src);
2069 vmovdqu(dst, Address(rscratch, 0));
2070 }
2071 }
2072
2073 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2074 assert(rscratch != noreg || always_reachable(src), "missing");
2075
2076 if (vector_len == AVX_512bit) {
2077 evmovdquq(dst, src, AVX_512bit, rscratch);
2078 } else if (vector_len == AVX_256bit) {
2079 vmovdqu(dst, src, rscratch);
2080 } else {
2081 movdqu(dst, src, rscratch);
2082 }
2083 }
2084
2085 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
2086 if (vector_len == AVX_512bit) {
2087 evmovdquq(dst, src, AVX_512bit);
2088 } else if (vector_len == AVX_256bit) {
2089 vmovdqu(dst, src);
2090 } else {
2091 movdqu(dst, src);
2092 }
2093 }
2094
2095 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
2096 if (vector_len == AVX_512bit) {
2097 evmovdquq(dst, src, AVX_512bit);
2098 } else if (vector_len == AVX_256bit) {
2099 vmovdqu(dst, src);
2100 } else {
2101 movdqu(dst, src);
2102 }
2103 }
2104
2105 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
2106 if (vector_len == AVX_512bit) {
2107 evmovdquq(dst, src, AVX_512bit);
2108 } else if (vector_len == AVX_256bit) {
2109 vmovdqu(dst, src);
2110 } else {
2111 movdqu(dst, src);
2112 }
2113 }
2114
2115 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2116 assert(rscratch != noreg || always_reachable(src), "missing");
2117
2118 if (reachable(src)) {
2119 vmovdqa(dst, as_Address(src));
2120 }
2121 else {
2122 lea(rscratch, src);
2123 vmovdqa(dst, Address(rscratch, 0));
2124 }
2125 }
2126
2127 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2128 assert(rscratch != noreg || always_reachable(src), "missing");
2129
2130 if (vector_len == AVX_512bit) {
2131 evmovdqaq(dst, src, AVX_512bit, rscratch);
2132 } else if (vector_len == AVX_256bit) {
2133 vmovdqa(dst, src, rscratch);
2134 } else {
2135 movdqa(dst, src, rscratch);
2136 }
2137 }
2138
2139 void MacroAssembler::kmov(KRegister dst, Address src) {
2140 if (VM_Version::supports_avx512bw()) {
2141 kmovql(dst, src);
2142 } else {
2143 assert(VM_Version::supports_evex(), "");
2144 kmovwl(dst, src);
2145 }
2146 }
2147
2148 void MacroAssembler::kmov(Address dst, KRegister src) {
2149 if (VM_Version::supports_avx512bw()) {
2150 kmovql(dst, src);
2151 } else {
2152 assert(VM_Version::supports_evex(), "");
2153 kmovwl(dst, src);
2154 }
2155 }
2156
2157 void MacroAssembler::kmov(KRegister dst, KRegister src) {
2158 if (VM_Version::supports_avx512bw()) {
2159 kmovql(dst, src);
2160 } else {
2161 assert(VM_Version::supports_evex(), "");
2162 kmovwl(dst, src);
2163 }
2164 }
2165
2166 void MacroAssembler::kmov(Register dst, KRegister src) {
2167 if (VM_Version::supports_avx512bw()) {
2168 kmovql(dst, src);
2169 } else {
2170 assert(VM_Version::supports_evex(), "");
2171 kmovwl(dst, src);
2172 }
2173 }
2174
2175 void MacroAssembler::kmov(KRegister dst, Register src) {
2176 if (VM_Version::supports_avx512bw()) {
2177 kmovql(dst, src);
2178 } else {
2179 assert(VM_Version::supports_evex(), "");
2180 kmovwl(dst, src);
2181 }
2182 }
2183
2184 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
2185 assert(rscratch != noreg || always_reachable(src), "missing");
2186
2187 if (reachable(src)) {
2188 kmovql(dst, as_Address(src));
2189 } else {
2190 lea(rscratch, src);
2191 kmovql(dst, Address(rscratch, 0));
2192 }
2193 }
2194
2195 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
2196 assert(rscratch != noreg || always_reachable(src), "missing");
2197
2198 if (reachable(src)) {
2199 kmovwl(dst, as_Address(src));
2200 } else {
2201 lea(rscratch, src);
2202 kmovwl(dst, Address(rscratch, 0));
2203 }
2204 }
2205
2206 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2207 int vector_len, Register rscratch) {
2208 assert(rscratch != noreg || always_reachable(src), "missing");
2209
2210 if (reachable(src)) {
2211 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
2212 } else {
2213 lea(rscratch, src);
2214 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
2215 }
2216 }
2217
2218 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2219 int vector_len, Register rscratch) {
2220 assert(rscratch != noreg || always_reachable(src), "missing");
2221
2222 if (reachable(src)) {
2223 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
2224 } else {
2225 lea(rscratch, src);
2226 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
2227 }
2228 }
2229
2230 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2231 assert(rscratch != noreg || always_reachable(src), "missing");
2232
2233 if (reachable(src)) {
2234 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
2235 } else {
2236 lea(rscratch, src);
2237 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
2238 }
2239 }
2240
2241 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2242 assert(rscratch != noreg || always_reachable(src), "missing");
2243
2244 if (reachable(src)) {
2245 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
2246 } else {
2247 lea(rscratch, src);
2248 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
2249 }
2250 }
2251
2252 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2253 assert(rscratch != noreg || always_reachable(src), "missing");
2254
2255 if (reachable(src)) {
2256 Assembler::evmovdquq(dst, as_Address(src), vector_len);
2257 } else {
2258 lea(rscratch, src);
2259 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
2260 }
2261 }
2262
2263 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2264 assert(rscratch != noreg || always_reachable(src), "missing");
2265
2266 if (reachable(src)) {
2267 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
2268 } else {
2269 lea(rscratch, src);
2270 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
2271 }
2272 }
2273
2274 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2275 assert(rscratch != noreg || always_reachable(src), "missing");
2276
2277 if (reachable(src)) {
2278 Assembler::evmovdqaq(dst, as_Address(src), vector_len);
2279 } else {
2280 lea(rscratch, src);
2281 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
2282 }
2283 }
2284
2285 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2286 assert(rscratch != noreg || always_reachable(src), "missing");
2287
2288 if (reachable(src)) {
2289 Assembler::movapd(dst, as_Address(src));
2290 } else {
2291 lea(rscratch, src);
2292 Assembler::movapd(dst, Address(rscratch, 0));
2293 }
2294 }
2295
2296 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2297 assert(rscratch != noreg || always_reachable(src), "missing");
2298
2299 if (reachable(src)) {
2300 Assembler::movdqa(dst, as_Address(src));
2301 } else {
2302 lea(rscratch, src);
2303 Assembler::movdqa(dst, Address(rscratch, 0));
2304 }
2305 }
2306
2307 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2308 assert(rscratch != noreg || always_reachable(src), "missing");
2309
2310 if (reachable(src)) {
2311 Assembler::movsd(dst, as_Address(src));
2312 } else {
2313 lea(rscratch, src);
2314 Assembler::movsd(dst, Address(rscratch, 0));
2315 }
2316 }
2317
2318 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2319 assert(rscratch != noreg || always_reachable(src), "missing");
2320
2321 if (reachable(src)) {
2322 Assembler::movss(dst, as_Address(src));
2323 } else {
2324 lea(rscratch, src);
2325 Assembler::movss(dst, Address(rscratch, 0));
2326 }
2327 }
2328
2329 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
2330 assert(rscratch != noreg || always_reachable(src), "missing");
2331
2332 if (reachable(src)) {
2333 Assembler::movddup(dst, as_Address(src));
2334 } else {
2335 lea(rscratch, src);
2336 Assembler::movddup(dst, Address(rscratch, 0));
2337 }
2338 }
2339
2340 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2341 assert(rscratch != noreg || always_reachable(src), "missing");
2342
2343 if (reachable(src)) {
2344 Assembler::vmovddup(dst, as_Address(src), vector_len);
2345 } else {
2346 lea(rscratch, src);
2347 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
2348 }
2349 }
2350
2351 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2352 assert(rscratch != noreg || always_reachable(src), "missing");
2353
2354 if (reachable(src)) {
2355 Assembler::mulsd(dst, as_Address(src));
2356 } else {
2357 lea(rscratch, src);
2358 Assembler::mulsd(dst, Address(rscratch, 0));
2359 }
2360 }
2361
2362 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2363 assert(rscratch != noreg || always_reachable(src), "missing");
2364
2365 if (reachable(src)) {
2366 Assembler::mulss(dst, as_Address(src));
2367 } else {
2368 lea(rscratch, src);
2369 Assembler::mulss(dst, Address(rscratch, 0));
2370 }
2371 }
2372
2373 void MacroAssembler::null_check(Register reg, int offset) {
2374 if (needs_explicit_null_check(offset)) {
2375 // provoke OS null exception if reg is null by
2376 // accessing M[reg] w/o changing any (non-CC) registers
2377 // NOTE: cmpl is plenty here to provoke a segv
2378 cmpptr(rax, Address(reg, 0));
2379 // Note: should probably use testl(rax, Address(reg, 0));
2380 // may be shorter code (however, this version of
2381 // testl needs to be implemented first)
2382 } else {
2383 // nothing to do, (later) access of M[reg + offset]
2384 // will provoke OS null exception if reg is null
2385 }
2386 }
2387
2388 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2389 andptr(markword, markWord::inline_type_pattern_mask);
2390 cmpptr(markword, markWord::inline_type_pattern);
2391 jcc(Assembler::equal, is_inline_type);
2392 }
2393
2394 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2395 if (can_be_null) {
2396 testptr(object, object);
2397 jcc(Assembler::zero, not_inline_type);
2398 }
2399 const int is_inline_type_mask = markWord::inline_type_pattern;
2400 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2401 andptr(tmp, is_inline_type_mask);
2402 cmpptr(tmp, is_inline_type_mask);
2403 jcc(Assembler::notEqual, not_inline_type);
2404 }
2405
2406 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2407 movl(temp_reg, flags);
2408 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2409 jcc(Assembler::notEqual, is_null_free_inline_type);
2410 }
2411
2412 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2413 movl(temp_reg, flags);
2414 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2415 jcc(Assembler::equal, not_null_free_inline_type);
2416 }
2417
2418 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2419 movl(temp_reg, flags);
2420 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
2421 jcc(Assembler::notEqual, is_flat);
2422 }
2423
2424 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2425 movl(temp_reg, flags);
2426 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
2427 jcc(Assembler::notEqual, has_null_marker);
2428 }
2429
2430 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2431 Label test_mark_word;
2432 // load mark word
2433 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2434 // check displaced
2435 testl(temp_reg, markWord::unlocked_value);
2436 jccb(Assembler::notZero, test_mark_word);
2437 // slow path use klass prototype
2438 push(rscratch1);
2439 load_prototype_header(temp_reg, oop, rscratch1);
2440 pop(rscratch1);
2441
2442 bind(test_mark_word);
2443 testl(temp_reg, test_bit);
2444 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
2445 }
2446
2447 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
2448 Label& is_flat_array) {
2449 #ifdef _LP64
2450 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2451 #else
2452 load_klass(temp_reg, oop, noreg);
2453 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2454 test_flat_array_layout(temp_reg, is_flat_array);
2455 #endif
2456 }
2457
2458 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2459 Label& is_non_flat_array) {
2460 #ifdef _LP64
2461 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2462 #else
2463 load_klass(temp_reg, oop, noreg);
2464 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2465 test_non_flat_array_layout(temp_reg, is_non_flat_array);
2466 #endif
2467 }
2468
2469 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
2470 #ifdef _LP64
2471 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2472 #else
2473 Unimplemented();
2474 #endif
2475 }
2476
2477 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2478 #ifdef _LP64
2479 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2480 #else
2481 Unimplemented();
2482 #endif
2483 }
2484
2485 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2486 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2487 jcc(Assembler::notZero, is_flat_array);
2488 }
2489
2490 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2491 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2492 jcc(Assembler::zero, is_non_flat_array);
2493 }
2494
2495 void MacroAssembler::os_breakpoint() {
2496 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
2497 // (e.g., MSVC can't call ps() otherwise)
2498 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
2499 }
2500
2501 void MacroAssembler::unimplemented(const char* what) {
2502 const char* buf = nullptr;
2503 {
2504 ResourceMark rm;
2505 stringStream ss;
2506 ss.print("unimplemented: %s", what);
2507 buf = code_string(ss.as_string());
2508 }
2509 stop(buf);
2510 }
2511
2512 #define XSTATE_BV 0x200
2513
2514 void MacroAssembler::pop_CPU_state() {
2515 pop_FPU_state();
2516 pop_IU_state();
2517 }
2518
2519 void MacroAssembler::pop_FPU_state() {
2520 fxrstor(Address(rsp, 0));
2521 addptr(rsp, FPUStateSizeInWords * wordSize);
2522 }
2523
2524 void MacroAssembler::pop_IU_state() {
2525 popa();
2526 addq(rsp, 8);
2527 popf();
2528 }
2529
2530 // Save Integer and Float state
2531 // Warning: Stack must be 16 byte aligned (64bit)
2532 void MacroAssembler::push_CPU_state() {
2533 push_IU_state();
2534 push_FPU_state();
2535 }
2536
2537 void MacroAssembler::push_FPU_state() {
2538 subptr(rsp, FPUStateSizeInWords * wordSize);
2539 fxsave(Address(rsp, 0));
2540 }
2541
2542 void MacroAssembler::push_IU_state() {
2543 // Push flags first because pusha kills them
2544 pushf();
2545 // Make sure rsp stays 16-byte aligned
2546 subq(rsp, 8);
2547 pusha();
2548 }
2549
2550 void MacroAssembler::push_cont_fastpath() {
2551 if (!Continuations::enabled()) return;
2552
2553 Label L_done;
2554 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2555 jccb(Assembler::belowEqual, L_done);
2556 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
2557 bind(L_done);
2558 }
2559
2560 void MacroAssembler::pop_cont_fastpath() {
2561 if (!Continuations::enabled()) return;
2562
2563 Label L_done;
2564 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2565 jccb(Assembler::below, L_done);
2566 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
2567 bind(L_done);
2568 }
2569
2570 #ifdef ASSERT
2571 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
2572 Label no_cont;
2573 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
2574 testl(cont, cont);
2575 jcc(Assembler::zero, no_cont);
2576 stop(name);
2577 bind(no_cont);
2578 }
2579 #endif
2580
2581 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
2582 // we must set sp to zero to clear frame
2583 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
2584 // must clear fp, so that compiled frames are not confused; it is
2585 // possible that we need it only for debugging
2586 if (clear_fp) {
2587 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2588 }
2589 // Always clear the pc because it could have been set by make_walkable()
2590 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
2591 vzeroupper();
2592 }
2593
2594 void MacroAssembler::round_to(Register reg, int modulus) {
2595 addptr(reg, modulus - 1);
2596 andptr(reg, -modulus);
2597 }
2598
2599 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
2600 if (at_return) {
2601 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
2602 // we may safely use rsp instead to perform the stack watermark check.
2603 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
2604 jcc(Assembler::above, slow_path);
2605 return;
2606 }
2607 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2608 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
2609 }
2610
2611 // Calls to C land
2612 //
2613 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
2614 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
2615 // has to be reset to 0. This is required to allow proper stack traversal.
2616 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2617 Register last_java_fp,
2618 address last_java_pc,
2619 Register rscratch) {
2620 vzeroupper();
2621 // determine last_java_sp register
2622 if (!last_java_sp->is_valid()) {
2623 last_java_sp = rsp;
2624 }
2625 // last_java_fp is optional
2626 if (last_java_fp->is_valid()) {
2627 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
2628 }
2629 // last_java_pc is optional
2630 if (last_java_pc != nullptr) {
2631 Address java_pc(r15_thread,
2632 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
2633 lea(java_pc, InternalAddress(last_java_pc), rscratch);
2634 }
2635 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
2636 }
2637
2638 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2639 Register last_java_fp,
2640 Label &L,
2641 Register scratch) {
2642 lea(scratch, L);
2643 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
2644 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
2645 }
2646
2647 void MacroAssembler::shlptr(Register dst, int imm8) {
2648 shlq(dst, imm8);
2649 }
2650
2651 void MacroAssembler::shrptr(Register dst, int imm8) {
2652 shrq(dst, imm8);
2653 }
2654
2655 void MacroAssembler::sign_extend_byte(Register reg) {
2656 movsbl(reg, reg); // movsxb
2657 }
2658
2659 void MacroAssembler::sign_extend_short(Register reg) {
2660 movswl(reg, reg); // movsxw
2661 }
2662
2663 void MacroAssembler::narrow_subword_type(Register reg, BasicType bt) {
2664 assert(is_subword_type(bt), "required");
2665 switch (bt) {
2666 case T_BOOLEAN: andl(reg, 1); break;
2667 case T_BYTE: movsbl(reg, reg); break;
2668 case T_CHAR: movzwl(reg, reg); break;
2669 case T_SHORT: movswl(reg, reg); break;
2670 default: ShouldNotReachHere();
2671 }
2672 }
2673
2674 void MacroAssembler::testl(Address dst, int32_t imm32) {
2675 if (imm32 >= 0 && is8bit(imm32)) {
2676 testb(dst, imm32);
2677 } else {
2678 Assembler::testl(dst, imm32);
2679 }
2680 }
2681
2682 void MacroAssembler::testl(Register dst, int32_t imm32) {
2683 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
2684 testb(dst, imm32);
2685 } else {
2686 Assembler::testl(dst, imm32);
2687 }
2688 }
2689
2690 void MacroAssembler::testl(Register dst, AddressLiteral src) {
2691 assert(always_reachable(src), "Address should be reachable");
2692 testl(dst, as_Address(src));
2693 }
2694
2695 void MacroAssembler::testq(Address dst, int32_t imm32) {
2696 if (imm32 >= 0) {
2697 testl(dst, imm32);
2698 } else {
2699 Assembler::testq(dst, imm32);
2700 }
2701 }
2702
2703 void MacroAssembler::testq(Register dst, int32_t imm32) {
2704 if (imm32 >= 0) {
2705 testl(dst, imm32);
2706 } else {
2707 Assembler::testq(dst, imm32);
2708 }
2709 }
2710
2711 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
2712 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2713 Assembler::pcmpeqb(dst, src);
2714 }
2715
2716 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
2717 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2718 Assembler::pcmpeqw(dst, src);
2719 }
2720
2721 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2722 assert((dst->encoding() < 16),"XMM register should be 0-15");
2723 Assembler::pcmpestri(dst, src, imm8);
2724 }
2725
2726 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2727 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2728 Assembler::pcmpestri(dst, src, imm8);
2729 }
2730
2731 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2732 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2733 Assembler::pmovzxbw(dst, src);
2734 }
2735
2736 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
2737 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2738 Assembler::pmovzxbw(dst, src);
2739 }
2740
2741 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
2742 assert((src->encoding() < 16),"XMM register should be 0-15");
2743 Assembler::pmovmskb(dst, src);
2744 }
2745
2746 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
2747 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2748 Assembler::ptest(dst, src);
2749 }
2750
2751 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2752 assert(rscratch != noreg || always_reachable(src), "missing");
2753
2754 if (reachable(src)) {
2755 Assembler::sqrtss(dst, as_Address(src));
2756 } else {
2757 lea(rscratch, src);
2758 Assembler::sqrtss(dst, Address(rscratch, 0));
2759 }
2760 }
2761
2762 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2763 assert(rscratch != noreg || always_reachable(src), "missing");
2764
2765 if (reachable(src)) {
2766 Assembler::subsd(dst, as_Address(src));
2767 } else {
2768 lea(rscratch, src);
2769 Assembler::subsd(dst, Address(rscratch, 0));
2770 }
2771 }
2772
2773 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
2774 assert(rscratch != noreg || always_reachable(src), "missing");
2775
2776 if (reachable(src)) {
2777 Assembler::roundsd(dst, as_Address(src), rmode);
2778 } else {
2779 lea(rscratch, src);
2780 Assembler::roundsd(dst, Address(rscratch, 0), rmode);
2781 }
2782 }
2783
2784 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2785 assert(rscratch != noreg || always_reachable(src), "missing");
2786
2787 if (reachable(src)) {
2788 Assembler::subss(dst, as_Address(src));
2789 } else {
2790 lea(rscratch, src);
2791 Assembler::subss(dst, Address(rscratch, 0));
2792 }
2793 }
2794
2795 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2796 assert(rscratch != noreg || always_reachable(src), "missing");
2797
2798 if (reachable(src)) {
2799 Assembler::ucomisd(dst, as_Address(src));
2800 } else {
2801 lea(rscratch, src);
2802 Assembler::ucomisd(dst, Address(rscratch, 0));
2803 }
2804 }
2805
2806 void MacroAssembler::evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2807 assert(rscratch != noreg || always_reachable(src), "missing");
2808
2809 if (reachable(src)) {
2810 Assembler::evucomxsd(dst, as_Address(src));
2811 } else {
2812 lea(rscratch, src);
2813 Assembler::evucomxsd(dst, Address(rscratch, 0));
2814 }
2815 }
2816
2817 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2818 assert(rscratch != noreg || always_reachable(src), "missing");
2819
2820 if (reachable(src)) {
2821 Assembler::ucomiss(dst, as_Address(src));
2822 } else {
2823 lea(rscratch, src);
2824 Assembler::ucomiss(dst, Address(rscratch, 0));
2825 }
2826 }
2827
2828 void MacroAssembler::evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2829 assert(rscratch != noreg || always_reachable(src), "missing");
2830
2831 if (reachable(src)) {
2832 Assembler::evucomxss(dst, as_Address(src));
2833 } else {
2834 lea(rscratch, src);
2835 Assembler::evucomxss(dst, Address(rscratch, 0));
2836 }
2837 }
2838
2839 void MacroAssembler::evucomish(XMMRegister dst, AddressLiteral src, Register rscratch) {
2840 assert(rscratch != noreg || always_reachable(src), "missing");
2841
2842 if (reachable(src)) {
2843 Assembler::evucomish(dst, as_Address(src));
2844 } else {
2845 lea(rscratch, src);
2846 Assembler::evucomish(dst, Address(rscratch, 0));
2847 }
2848 }
2849
2850 void MacroAssembler::evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch) {
2851 assert(rscratch != noreg || always_reachable(src), "missing");
2852
2853 if (reachable(src)) {
2854 Assembler::evucomxsh(dst, as_Address(src));
2855 } else {
2856 lea(rscratch, src);
2857 Assembler::evucomxsh(dst, Address(rscratch, 0));
2858 }
2859 }
2860
2861 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2862 assert(rscratch != noreg || always_reachable(src), "missing");
2863
2864 // Used in sign-bit flipping with aligned address.
2865 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2866
2867 if (UseAVX > 2 &&
2868 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2869 (dst->encoding() >= 16)) {
2870 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2871 } else if (reachable(src)) {
2872 Assembler::xorpd(dst, as_Address(src));
2873 } else {
2874 lea(rscratch, src);
2875 Assembler::xorpd(dst, Address(rscratch, 0));
2876 }
2877 }
2878
2879 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
2880 if (UseAVX > 2 &&
2881 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2882 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2883 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2884 } else {
2885 Assembler::xorpd(dst, src);
2886 }
2887 }
2888
2889 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
2890 if (UseAVX > 2 &&
2891 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2892 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2893 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2894 } else {
2895 Assembler::xorps(dst, src);
2896 }
2897 }
2898
2899 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
2900 assert(rscratch != noreg || always_reachable(src), "missing");
2901
2902 // Used in sign-bit flipping with aligned address.
2903 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2904
2905 if (UseAVX > 2 &&
2906 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2907 (dst->encoding() >= 16)) {
2908 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2909 } else if (reachable(src)) {
2910 Assembler::xorps(dst, as_Address(src));
2911 } else {
2912 lea(rscratch, src);
2913 Assembler::xorps(dst, Address(rscratch, 0));
2914 }
2915 }
2916
2917 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
2918 assert(rscratch != noreg || always_reachable(src), "missing");
2919
2920 // Used in sign-bit flipping with aligned address.
2921 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
2922 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
2923 if (reachable(src)) {
2924 Assembler::pshufb(dst, as_Address(src));
2925 } else {
2926 lea(rscratch, src);
2927 Assembler::pshufb(dst, Address(rscratch, 0));
2928 }
2929 }
2930
2931 // AVX 3-operands instructions
2932
2933 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2934 assert(rscratch != noreg || always_reachable(src), "missing");
2935
2936 if (reachable(src)) {
2937 vaddsd(dst, nds, as_Address(src));
2938 } else {
2939 lea(rscratch, src);
2940 vaddsd(dst, nds, Address(rscratch, 0));
2941 }
2942 }
2943
2944 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2945 assert(rscratch != noreg || always_reachable(src), "missing");
2946
2947 if (reachable(src)) {
2948 vaddss(dst, nds, as_Address(src));
2949 } else {
2950 lea(rscratch, src);
2951 vaddss(dst, nds, Address(rscratch, 0));
2952 }
2953 }
2954
2955 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2956 assert(UseAVX > 0, "requires some form of AVX");
2957 assert(rscratch != noreg || always_reachable(src), "missing");
2958
2959 if (reachable(src)) {
2960 Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
2961 } else {
2962 lea(rscratch, src);
2963 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
2964 }
2965 }
2966
2967 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2968 assert(UseAVX > 0, "requires some form of AVX");
2969 assert(rscratch != noreg || always_reachable(src), "missing");
2970
2971 if (reachable(src)) {
2972 Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
2973 } else {
2974 lea(rscratch, src);
2975 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
2976 }
2977 }
2978
2979 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2980 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2981 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2982
2983 vandps(dst, nds, negate_field, vector_len, rscratch);
2984 }
2985
2986 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2987 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2988 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2989
2990 vandpd(dst, nds, negate_field, vector_len, rscratch);
2991 }
2992
2993 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2994 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2995 Assembler::vpaddb(dst, nds, src, vector_len);
2996 }
2997
2998 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2999 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3000 Assembler::vpaddb(dst, nds, src, vector_len);
3001 }
3002
3003 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3004 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3005 Assembler::vpaddw(dst, nds, src, vector_len);
3006 }
3007
3008 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3009 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3010 Assembler::vpaddw(dst, nds, src, vector_len);
3011 }
3012
3013 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3014 assert(rscratch != noreg || always_reachable(src), "missing");
3015
3016 if (reachable(src)) {
3017 Assembler::vpand(dst, nds, as_Address(src), vector_len);
3018 } else {
3019 lea(rscratch, src);
3020 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
3021 }
3022 }
3023
3024 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3025 assert(rscratch != noreg || always_reachable(src), "missing");
3026
3027 if (reachable(src)) {
3028 Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
3029 } else {
3030 lea(rscratch, src);
3031 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
3032 }
3033 }
3034
3035 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3036 assert(rscratch != noreg || always_reachable(src), "missing");
3037
3038 if (reachable(src)) {
3039 Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
3040 } else {
3041 lea(rscratch, src);
3042 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
3043 }
3044 }
3045
3046 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3047 assert(rscratch != noreg || always_reachable(src), "missing");
3048
3049 if (reachable(src)) {
3050 Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
3051 } else {
3052 lea(rscratch, src);
3053 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
3054 }
3055 }
3056
3057 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3058 assert(rscratch != noreg || always_reachable(src), "missing");
3059
3060 if (reachable(src)) {
3061 Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
3062 } else {
3063 lea(rscratch, src);
3064 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
3065 }
3066 }
3067
3068 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3069 assert(rscratch != noreg || always_reachable(src), "missing");
3070
3071 if (reachable(src)) {
3072 Assembler::vbroadcastss(dst, as_Address(src), vector_len);
3073 } else {
3074 lea(rscratch, src);
3075 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
3076 }
3077 }
3078
3079 // Vector float blend
3080 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3081 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3082 // WARN: Allow dst == (src1|src2), mask == scratch
3083 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3084 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3085 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
3086 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3087 if (blend_emulation && scratch_available && dst_available) {
3088 if (compute_mask) {
3089 vpsrad(scratch, mask, 32, vector_len);
3090 mask = scratch;
3091 }
3092 if (dst == src1) {
3093 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1
3094 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3095 } else {
3096 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3097 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
3098 }
3099 vpor(dst, dst, scratch, vector_len);
3100 } else {
3101 Assembler::vblendvps(dst, src1, src2, mask, vector_len);
3102 }
3103 }
3104
3105 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3106 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3107 // WARN: Allow dst == (src1|src2), mask == scratch
3108 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3109 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3110 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
3111 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3112 if (blend_emulation && scratch_available && dst_available) {
3113 if (compute_mask) {
3114 vpxor(scratch, scratch, scratch, vector_len);
3115 vpcmpgtq(scratch, scratch, mask, vector_len);
3116 mask = scratch;
3117 }
3118 if (dst == src1) {
3119 vpandn(dst, mask, src1, vector_len); // if mask == 0, src
3120 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3121 } else {
3122 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3123 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
3124 }
3125 vpor(dst, dst, scratch, vector_len);
3126 } else {
3127 Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
3128 }
3129 }
3130
3131 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3132 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3133 Assembler::vpcmpeqb(dst, nds, src, vector_len);
3134 }
3135
3136 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
3137 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3138 Assembler::vpcmpeqb(dst, src1, src2, vector_len);
3139 }
3140
3141 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3142 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3143 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3144 }
3145
3146 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3147 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3148 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3149 }
3150
3151 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3152 assert(rscratch != noreg || always_reachable(src), "missing");
3153
3154 if (reachable(src)) {
3155 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
3156 } else {
3157 lea(rscratch, src);
3158 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
3159 }
3160 }
3161
3162 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3163 int comparison, bool is_signed, int vector_len, Register rscratch) {
3164 assert(rscratch != noreg || always_reachable(src), "missing");
3165
3166 if (reachable(src)) {
3167 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3168 } else {
3169 lea(rscratch, src);
3170 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3171 }
3172 }
3173
3174 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3175 int comparison, bool is_signed, int vector_len, Register rscratch) {
3176 assert(rscratch != noreg || always_reachable(src), "missing");
3177
3178 if (reachable(src)) {
3179 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3180 } else {
3181 lea(rscratch, src);
3182 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3183 }
3184 }
3185
3186 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3187 int comparison, bool is_signed, int vector_len, Register rscratch) {
3188 assert(rscratch != noreg || always_reachable(src), "missing");
3189
3190 if (reachable(src)) {
3191 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3192 } else {
3193 lea(rscratch, src);
3194 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3195 }
3196 }
3197
3198 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3199 int comparison, bool is_signed, int vector_len, Register rscratch) {
3200 assert(rscratch != noreg || always_reachable(src), "missing");
3201
3202 if (reachable(src)) {
3203 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3204 } else {
3205 lea(rscratch, src);
3206 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3207 }
3208 }
3209
3210 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
3211 if (width == Assembler::Q) {
3212 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
3213 } else {
3214 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
3215 }
3216 }
3217
3218 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
3219 int eq_cond_enc = 0x29;
3220 int gt_cond_enc = 0x37;
3221 if (width != Assembler::Q) {
3222 eq_cond_enc = 0x74 + width;
3223 gt_cond_enc = 0x64 + width;
3224 }
3225 switch (cond) {
3226 case eq:
3227 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3228 break;
3229 case neq:
3230 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3231 vallones(xtmp, vector_len);
3232 vpxor(dst, xtmp, dst, vector_len);
3233 break;
3234 case le:
3235 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3236 vallones(xtmp, vector_len);
3237 vpxor(dst, xtmp, dst, vector_len);
3238 break;
3239 case nlt:
3240 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3241 vallones(xtmp, vector_len);
3242 vpxor(dst, xtmp, dst, vector_len);
3243 break;
3244 case lt:
3245 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3246 break;
3247 case nle:
3248 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3249 break;
3250 default:
3251 assert(false, "Should not reach here");
3252 }
3253 }
3254
3255 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3256 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3257 Assembler::vpmovzxbw(dst, src, vector_len);
3258 }
3259
3260 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
3261 assert((src->encoding() < 16),"XMM register should be 0-15");
3262 Assembler::vpmovmskb(dst, src, vector_len);
3263 }
3264
3265 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3266 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3267 Assembler::vpmullw(dst, nds, src, vector_len);
3268 }
3269
3270 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3271 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3272 Assembler::vpmullw(dst, nds, src, vector_len);
3273 }
3274
3275 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3276 assert((UseAVX > 0), "AVX support is needed");
3277 assert(rscratch != noreg || always_reachable(src), "missing");
3278
3279 if (reachable(src)) {
3280 Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
3281 } else {
3282 lea(rscratch, src);
3283 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
3284 }
3285 }
3286
3287 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3288 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3289 Assembler::vpsubb(dst, nds, src, vector_len);
3290 }
3291
3292 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3293 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3294 Assembler::vpsubb(dst, nds, src, vector_len);
3295 }
3296
3297 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3298 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3299 Assembler::vpsubw(dst, nds, src, vector_len);
3300 }
3301
3302 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3303 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3304 Assembler::vpsubw(dst, nds, src, vector_len);
3305 }
3306
3307 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3308 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3309 Assembler::vpsraw(dst, nds, shift, vector_len);
3310 }
3311
3312 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3313 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3314 Assembler::vpsraw(dst, nds, shift, vector_len);
3315 }
3316
3317 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3318 assert(UseAVX > 2,"");
3319 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3320 vector_len = 2;
3321 }
3322 Assembler::evpsraq(dst, nds, shift, vector_len);
3323 }
3324
3325 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3326 assert(UseAVX > 2,"");
3327 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3328 vector_len = 2;
3329 }
3330 Assembler::evpsraq(dst, nds, shift, vector_len);
3331 }
3332
3333 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3334 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3335 Assembler::vpsrlw(dst, nds, shift, vector_len);
3336 }
3337
3338 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3339 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3340 Assembler::vpsrlw(dst, nds, shift, vector_len);
3341 }
3342
3343 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3344 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3345 Assembler::vpsllw(dst, nds, shift, vector_len);
3346 }
3347
3348 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3349 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3350 Assembler::vpsllw(dst, nds, shift, vector_len);
3351 }
3352
3353 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
3354 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
3355 Assembler::vptest(dst, src);
3356 }
3357
3358 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3359 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3360 Assembler::punpcklbw(dst, src);
3361 }
3362
3363 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
3364 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
3365 Assembler::pshufd(dst, src, mode);
3366 }
3367
3368 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
3369 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3370 Assembler::pshuflw(dst, src, mode);
3371 }
3372
3373 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3374 assert(rscratch != noreg || always_reachable(src), "missing");
3375
3376 if (reachable(src)) {
3377 vandpd(dst, nds, as_Address(src), vector_len);
3378 } else {
3379 lea(rscratch, src);
3380 vandpd(dst, nds, Address(rscratch, 0), vector_len);
3381 }
3382 }
3383
3384 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3385 assert(rscratch != noreg || always_reachable(src), "missing");
3386
3387 if (reachable(src)) {
3388 vandps(dst, nds, as_Address(src), vector_len);
3389 } else {
3390 lea(rscratch, src);
3391 vandps(dst, nds, Address(rscratch, 0), vector_len);
3392 }
3393 }
3394
3395 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
3396 bool merge, int vector_len, Register rscratch) {
3397 assert(rscratch != noreg || always_reachable(src), "missing");
3398
3399 if (reachable(src)) {
3400 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
3401 } else {
3402 lea(rscratch, src);
3403 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
3404 }
3405 }
3406
3407 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3408 assert(rscratch != noreg || always_reachable(src), "missing");
3409
3410 if (reachable(src)) {
3411 vdivsd(dst, nds, as_Address(src));
3412 } else {
3413 lea(rscratch, src);
3414 vdivsd(dst, nds, Address(rscratch, 0));
3415 }
3416 }
3417
3418 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3419 assert(rscratch != noreg || always_reachable(src), "missing");
3420
3421 if (reachable(src)) {
3422 vdivss(dst, nds, as_Address(src));
3423 } else {
3424 lea(rscratch, src);
3425 vdivss(dst, nds, Address(rscratch, 0));
3426 }
3427 }
3428
3429 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3430 assert(rscratch != noreg || always_reachable(src), "missing");
3431
3432 if (reachable(src)) {
3433 vmulsd(dst, nds, as_Address(src));
3434 } else {
3435 lea(rscratch, src);
3436 vmulsd(dst, nds, Address(rscratch, 0));
3437 }
3438 }
3439
3440 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3441 assert(rscratch != noreg || always_reachable(src), "missing");
3442
3443 if (reachable(src)) {
3444 vmulss(dst, nds, as_Address(src));
3445 } else {
3446 lea(rscratch, src);
3447 vmulss(dst, nds, Address(rscratch, 0));
3448 }
3449 }
3450
3451 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3452 assert(rscratch != noreg || always_reachable(src), "missing");
3453
3454 if (reachable(src)) {
3455 vsubsd(dst, nds, as_Address(src));
3456 } else {
3457 lea(rscratch, src);
3458 vsubsd(dst, nds, Address(rscratch, 0));
3459 }
3460 }
3461
3462 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3463 assert(rscratch != noreg || always_reachable(src), "missing");
3464
3465 if (reachable(src)) {
3466 vsubss(dst, nds, as_Address(src));
3467 } else {
3468 lea(rscratch, src);
3469 vsubss(dst, nds, Address(rscratch, 0));
3470 }
3471 }
3472
3473 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3474 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3475 assert(rscratch != noreg || always_reachable(src), "missing");
3476
3477 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
3478 }
3479
3480 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3481 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3482 assert(rscratch != noreg || always_reachable(src), "missing");
3483
3484 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
3485 }
3486
3487 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3488 assert(rscratch != noreg || always_reachable(src), "missing");
3489
3490 if (reachable(src)) {
3491 vxorpd(dst, nds, as_Address(src), vector_len);
3492 } else {
3493 lea(rscratch, src);
3494 vxorpd(dst, nds, Address(rscratch, 0), vector_len);
3495 }
3496 }
3497
3498 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3499 assert(rscratch != noreg || always_reachable(src), "missing");
3500
3501 if (reachable(src)) {
3502 vxorps(dst, nds, as_Address(src), vector_len);
3503 } else {
3504 lea(rscratch, src);
3505 vxorps(dst, nds, Address(rscratch, 0), vector_len);
3506 }
3507 }
3508
3509 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3510 assert(rscratch != noreg || always_reachable(src), "missing");
3511
3512 if (UseAVX > 1 || (vector_len < 1)) {
3513 if (reachable(src)) {
3514 Assembler::vpxor(dst, nds, as_Address(src), vector_len);
3515 } else {
3516 lea(rscratch, src);
3517 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
3518 }
3519 } else {
3520 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
3521 }
3522 }
3523
3524 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3525 assert(rscratch != noreg || always_reachable(src), "missing");
3526
3527 if (reachable(src)) {
3528 Assembler::vpermd(dst, nds, as_Address(src), vector_len);
3529 } else {
3530 lea(rscratch, src);
3531 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
3532 }
3533 }
3534
3535 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
3536 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
3537 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
3538 // The inverted mask is sign-extended
3539 andptr(possibly_non_local, inverted_mask);
3540 }
3541
3542 void MacroAssembler::resolve_jobject(Register value,
3543 Register tmp) {
3544 Register thread = r15_thread;
3545 assert_different_registers(value, thread, tmp);
3546 Label done, tagged, weak_tagged;
3547 testptr(value, value);
3548 jcc(Assembler::zero, done); // Use null as-is.
3549 testptr(value, JNIHandles::tag_mask); // Test for tag.
3550 jcc(Assembler::notZero, tagged);
3551
3552 // Resolve local handle
3553 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
3554 verify_oop(value);
3555 jmp(done);
3556
3557 bind(tagged);
3558 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
3559 jcc(Assembler::notZero, weak_tagged);
3560
3561 // Resolve global handle
3562 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3563 verify_oop(value);
3564 jmp(done);
3565
3566 bind(weak_tagged);
3567 // Resolve jweak.
3568 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3569 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
3570 verify_oop(value);
3571
3572 bind(done);
3573 }
3574
3575 void MacroAssembler::resolve_global_jobject(Register value,
3576 Register tmp) {
3577 Register thread = r15_thread;
3578 assert_different_registers(value, thread, tmp);
3579 Label done;
3580
3581 testptr(value, value);
3582 jcc(Assembler::zero, done); // Use null as-is.
3583
3584 #ifdef ASSERT
3585 {
3586 Label valid_global_tag;
3587 testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
3588 jcc(Assembler::notZero, valid_global_tag);
3589 stop("non global jobject using resolve_global_jobject");
3590 bind(valid_global_tag);
3591 }
3592 #endif
3593
3594 // Resolve global handle
3595 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3596 verify_oop(value);
3597
3598 bind(done);
3599 }
3600
3601 void MacroAssembler::subptr(Register dst, int32_t imm32) {
3602 subq(dst, imm32);
3603 }
3604
3605 // Force generation of a 4 byte immediate value even if it fits into 8bit
3606 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
3607 subq_imm32(dst, imm32);
3608 }
3609
3610 void MacroAssembler::subptr(Register dst, Register src) {
3611 subq(dst, src);
3612 }
3613
3614 // C++ bool manipulation
3615 void MacroAssembler::testbool(Register dst) {
3616 if(sizeof(bool) == 1)
3617 testb(dst, 0xff);
3618 else if(sizeof(bool) == 2) {
3619 // testw implementation needed for two byte bools
3620 ShouldNotReachHere();
3621 } else if(sizeof(bool) == 4)
3622 testl(dst, dst);
3623 else
3624 // unsupported
3625 ShouldNotReachHere();
3626 }
3627
3628 void MacroAssembler::testptr(Register dst, Register src) {
3629 testq(dst, src);
3630 }
3631
3632 // Object / value buffer allocation...
3633 //
3634 // Kills klass and rsi on LP64
3635 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
3636 Register t1, Register t2,
3637 bool clear_fields, Label& alloc_failed)
3638 {
3639 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
3640 Register layout_size = t1;
3641 assert(new_obj == rax, "needs to be rax");
3642 assert_different_registers(klass, new_obj, t1, t2);
3643
3644 // get instance_size in InstanceKlass (scaled to a count of bytes)
3645 movl(layout_size, Address(klass, Klass::layout_helper_offset()));
3646 // test to see if it is malformed in some way
3647 testl(layout_size, Klass::_lh_instance_slow_path_bit);
3648 jcc(Assembler::notZero, slow_case_no_pop);
3649
3650 // Allocate the instance:
3651 // If TLAB is enabled:
3652 // Try to allocate in the TLAB.
3653 // If fails, go to the slow path.
3654 // Else If inline contiguous allocations are enabled:
3655 // Try to allocate in eden.
3656 // If fails due to heap end, go to slow path.
3657 //
3658 // If TLAB is enabled OR inline contiguous is enabled:
3659 // Initialize the allocation.
3660 // Exit.
3661 //
3662 // Go to slow path.
3663
3664 push(klass);
3665 if (UseTLAB) {
3666 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
3667 if (ZeroTLAB || (!clear_fields)) {
3668 // the fields have been already cleared
3669 jmp(initialize_header);
3670 } else {
3671 // initialize both the header and fields
3672 jmp(initialize_object);
3673 }
3674 } else {
3675 jmp(slow_case);
3676 }
3677
3678 // If UseTLAB is true, the object is created above and there is an initialize need.
3679 // Otherwise, skip and go to the slow path.
3680 if (UseTLAB) {
3681 if (clear_fields) {
3682 // The object is initialized before the header. If the object size is
3683 // zero, go directly to the header initialization.
3684 bind(initialize_object);
3685 if (UseCompactObjectHeaders) {
3686 assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
3687 decrement(layout_size, oopDesc::base_offset_in_bytes());
3688 } else {
3689 decrement(layout_size, sizeof(oopDesc));
3690 }
3691 jcc(Assembler::zero, initialize_header);
3692
3693 // Initialize topmost object field, divide size by 8, check if odd and
3694 // test if zero.
3695 Register zero = klass;
3696 xorl(zero, zero); // use zero reg to clear memory (shorter code)
3697 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3698
3699 #ifdef ASSERT
3700 // make sure instance_size was multiple of 8
3701 Label L;
3702 // Ignore partial flag stall after shrl() since it is debug VM
3703 jcc(Assembler::carryClear, L);
3704 stop("object size is not multiple of 2 - adjust this code");
3705 bind(L);
3706 // must be > 0, no extra check needed here
3707 #endif
3708
3709 // initialize remaining object fields: instance_size was a multiple of 8
3710 {
3711 Label loop;
3712 bind(loop);
3713 int header_size_bytes = oopDesc::header_size() * HeapWordSize;
3714 assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
3715 movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
3716 decrement(layout_size);
3717 jcc(Assembler::notZero, loop);
3718 }
3719 } // clear_fields
3720
3721 // initialize object header only.
3722 bind(initialize_header);
3723 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
3724 pop(klass);
3725 Register mark_word = t2;
3726 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
3727 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
3728 } else {
3729 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
3730 (intptr_t)markWord::prototype().value()); // header
3731 pop(klass); // get saved klass back in the register.
3732 }
3733 if (!UseCompactObjectHeaders) {
3734 xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3735 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops
3736 movptr(t2, klass); // preserve klass
3737 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed
3738 }
3739 jmp(done);
3740 }
3741
3742 bind(slow_case);
3743 pop(klass);
3744 bind(slow_case_no_pop);
3745 jmp(alloc_failed);
3746
3747 bind(done);
3748 }
3749
3750 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3751 void MacroAssembler::tlab_allocate(Register obj,
3752 Register var_size_in_bytes,
3753 int con_size_in_bytes,
3754 Register t1,
3755 Register t2,
3756 Label& slow_case) {
3757 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3758 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
3759 }
3760
3761 RegSet MacroAssembler::call_clobbered_gp_registers() {
3762 RegSet regs;
3763 regs += RegSet::of(rax, rcx, rdx);
3764 #ifndef _WINDOWS
3765 regs += RegSet::of(rsi, rdi);
3766 #endif
3767 regs += RegSet::range(r8, r11);
3768 if (UseAPX) {
3769 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
3770 }
3771 return regs;
3772 }
3773
3774 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
3775 int num_xmm_registers = XMMRegister::available_xmm_registers();
3776 #if defined(_WINDOWS)
3777 XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
3778 if (num_xmm_registers > 16) {
3779 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
3780 }
3781 return result;
3782 #else
3783 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
3784 #endif
3785 }
3786
3787 // C1 only ever uses the first double/float of the XMM register.
3788 static int xmm_save_size() { return sizeof(double); }
3789
3790 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3791 masm->movdbl(Address(rsp, offset), reg);
3792 }
3793
3794 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3795 masm->movdbl(reg, Address(rsp, offset));
3796 }
3797
3798 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
3799 bool save_fpu, int& gp_area_size, int& xmm_area_size) {
3800
3801 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
3802 StackAlignmentInBytes);
3803 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
3804
3805 return gp_area_size + xmm_area_size;
3806 }
3807
3808 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
3809 block_comment("push_call_clobbered_registers start");
3810 // Regular registers
3811 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
3812
3813 int gp_area_size;
3814 int xmm_area_size;
3815 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
3816 gp_area_size, xmm_area_size);
3817 subptr(rsp, total_save_size);
3818
3819 push_set(gp_registers_to_push, 0);
3820
3821 if (save_fpu) {
3822 push_set(call_clobbered_xmm_registers(), gp_area_size);
3823 }
3824
3825 block_comment("push_call_clobbered_registers end");
3826 }
3827
3828 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
3829 block_comment("pop_call_clobbered_registers start");
3830
3831 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
3832
3833 int gp_area_size;
3834 int xmm_area_size;
3835 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
3836 gp_area_size, xmm_area_size);
3837
3838 if (restore_fpu) {
3839 pop_set(call_clobbered_xmm_registers(), gp_area_size);
3840 }
3841
3842 pop_set(gp_registers_to_pop, 0);
3843
3844 addptr(rsp, total_save_size);
3845
3846 vzeroupper();
3847
3848 block_comment("pop_call_clobbered_registers end");
3849 }
3850
3851 void MacroAssembler::push_set(XMMRegSet set, int offset) {
3852 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
3853 int spill_offset = offset;
3854
3855 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
3856 save_xmm_register(this, spill_offset, *it);
3857 spill_offset += xmm_save_size();
3858 }
3859 }
3860
3861 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
3862 int restore_size = set.size() * xmm_save_size();
3863 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
3864
3865 int restore_offset = offset + restore_size - xmm_save_size();
3866
3867 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
3868 restore_xmm_register(this, restore_offset, *it);
3869 restore_offset -= xmm_save_size();
3870 }
3871 }
3872
3873 void MacroAssembler::push_set(RegSet set, int offset) {
3874 int spill_offset;
3875 if (offset == -1) {
3876 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3877 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
3878 subptr(rsp, aligned_size);
3879 spill_offset = 0;
3880 } else {
3881 spill_offset = offset;
3882 }
3883
3884 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
3885 movptr(Address(rsp, spill_offset), *it);
3886 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3887 }
3888 }
3889
3890 void MacroAssembler::pop_set(RegSet set, int offset) {
3891
3892 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3893 int restore_size = set.size() * gp_reg_size;
3894 int aligned_size = align_up(restore_size, StackAlignmentInBytes);
3895
3896 int restore_offset;
3897 if (offset == -1) {
3898 restore_offset = restore_size - gp_reg_size;
3899 } else {
3900 restore_offset = offset + restore_size - gp_reg_size;
3901 }
3902 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
3903 movptr(*it, Address(rsp, restore_offset));
3904 restore_offset -= gp_reg_size;
3905 }
3906
3907 if (offset == -1) {
3908 addptr(rsp, aligned_size);
3909 }
3910 }
3911
3912 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3913 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3914 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3915 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3916 Label done;
3917
3918 testptr(length_in_bytes, length_in_bytes);
3919 jcc(Assembler::zero, done);
3920
3921 // initialize topmost word, divide index by 2, check if odd and test if zero
3922 // note: for the remaining code to work, index must be a multiple of BytesPerWord
3923 #ifdef ASSERT
3924 {
3925 Label L;
3926 testptr(length_in_bytes, BytesPerWord - 1);
3927 jcc(Assembler::zero, L);
3928 stop("length must be a multiple of BytesPerWord");
3929 bind(L);
3930 }
3931 #endif
3932 Register index = length_in_bytes;
3933 xorptr(temp, temp); // use _zero reg to clear memory (shorter code)
3934 if (UseIncDec) {
3935 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
3936 } else {
3937 shrptr(index, 2); // use 2 instructions to avoid partial flag stall
3938 shrptr(index, 1);
3939 }
3940
3941 // initialize remaining object fields: index is a multiple of 2 now
3942 {
3943 Label loop;
3944 bind(loop);
3945 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
3946 decrement(index);
3947 jcc(Assembler::notZero, loop);
3948 }
3949
3950 bind(done);
3951 }
3952
3953 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
3954 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
3955 #ifdef ASSERT
3956 {
3957 Label done;
3958 cmpptr(layout_info, 0);
3959 jcc(Assembler::notEqual, done);
3960 stop("inline_layout_info_array is null");
3961 bind(done);
3962 }
3963 #endif
3964
3965 InlineLayoutInfo array[2];
3966 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
3967 if (is_power_of_2(size)) {
3968 shll(index, log2i_exact(size)); // Scale index by power of 2
3969 } else {
3970 imull(index, index, size); // Scale the index to be the entry index * array_element_size
3971 }
3972 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
3973 }
3974
3975 // Look up the method for a megamorphic invokeinterface call.
3976 // The target method is determined by <intf_klass, itable_index>.
3977 // The receiver klass is in recv_klass.
3978 // On success, the result will be in method_result, and execution falls through.
3979 // On failure, execution transfers to the given label.
3980 void MacroAssembler::lookup_interface_method(Register recv_klass,
3981 Register intf_klass,
3982 RegisterOrConstant itable_index,
3983 Register method_result,
3984 Register scan_temp,
3985 Label& L_no_such_interface,
3986 bool return_method) {
3987 assert_different_registers(recv_klass, intf_klass, scan_temp);
3988 assert_different_registers(method_result, intf_klass, scan_temp);
3989 assert(recv_klass != method_result || !return_method,
3990 "recv_klass can be destroyed when method isn't needed");
3991
3992 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3993 "caller must use same register for non-constant itable index as for method");
3994
3995 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3996 int vtable_base = in_bytes(Klass::vtable_start_offset());
3997 int itentry_off = in_bytes(itableMethodEntry::method_offset());
3998 int scan_step = itableOffsetEntry::size() * wordSize;
3999 int vte_size = vtableEntry::size_in_bytes();
4000 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4001 assert(vte_size == wordSize, "else adjust times_vte_scale");
4002
4003 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
4004
4005 // Could store the aligned, prescaled offset in the klass.
4006 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
4007
4008 if (return_method) {
4009 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4010 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4011 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
4012 }
4013
4014 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
4015 // if (scan->interface() == intf) {
4016 // result = (klass + scan->offset() + itable_index);
4017 // }
4018 // }
4019 Label search, found_method;
4020
4021 for (int peel = 1; peel >= 0; peel--) {
4022 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
4023 cmpptr(intf_klass, method_result);
4024
4025 if (peel) {
4026 jccb(Assembler::equal, found_method);
4027 } else {
4028 jccb(Assembler::notEqual, search);
4029 // (invert the test to fall through to found_method...)
4030 }
4031
4032 if (!peel) break;
4033
4034 bind(search);
4035
4036 // Check that the previous entry is non-null. A null entry means that
4037 // the receiver class doesn't implement the interface, and wasn't the
4038 // same as when the caller was compiled.
4039 testptr(method_result, method_result);
4040 jcc(Assembler::zero, L_no_such_interface);
4041 addptr(scan_temp, scan_step);
4042 }
4043
4044 bind(found_method);
4045
4046 if (return_method) {
4047 // Got a hit.
4048 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
4049 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
4050 }
4051 }
4052
4053 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
4054 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
4055 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
4056 // The target method is determined by <holder_klass, itable_index>.
4057 // The receiver klass is in recv_klass.
4058 // On success, the result will be in method_result, and execution falls through.
4059 // On failure, execution transfers to the given label.
4060 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
4061 Register holder_klass,
4062 Register resolved_klass,
4063 Register method_result,
4064 Register scan_temp,
4065 Register temp_reg2,
4066 Register receiver,
4067 int itable_index,
4068 Label& L_no_such_interface) {
4069 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
4070 Register temp_itbl_klass = method_result;
4071 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
4072
4073 int vtable_base = in_bytes(Klass::vtable_start_offset());
4074 int itentry_off = in_bytes(itableMethodEntry::method_offset());
4075 int scan_step = itableOffsetEntry::size() * wordSize;
4076 int vte_size = vtableEntry::size_in_bytes();
4077 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
4078 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
4079 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4080 assert(vte_size == wordSize, "adjust times_vte_scale");
4081
4082 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
4083
4084 // temp_itbl_klass = recv_klass.itable[0]
4085 // scan_temp = &recv_klass.itable[0] + step
4086 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
4087 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
4088 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
4089 xorptr(temp_reg, temp_reg);
4090
4091 // Initial checks:
4092 // - if (holder_klass != resolved_klass), go to "scan for resolved"
4093 // - if (itable[0] == 0), no such interface
4094 // - if (itable[0] == holder_klass), shortcut to "holder found"
4095 cmpptr(holder_klass, resolved_klass);
4096 jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
4097 testptr(temp_itbl_klass, temp_itbl_klass);
4098 jccb(Assembler::zero, L_no_such_interface);
4099 cmpptr(holder_klass, temp_itbl_klass);
4100 jccb(Assembler::equal, L_holder_found);
4101
4102 // Loop: Look for holder_klass record in itable
4103 // do {
4104 // tmp = itable[index];
4105 // index += step;
4106 // if (tmp == holder_klass) {
4107 // goto L_holder_found; // Found!
4108 // }
4109 // } while (tmp != 0);
4110 // goto L_no_such_interface // Not found.
4111 Label L_scan_holder;
4112 bind(L_scan_holder);
4113 movptr(temp_itbl_klass, Address(scan_temp, 0));
4114 addptr(scan_temp, scan_step);
4115 cmpptr(holder_klass, temp_itbl_klass);
4116 jccb(Assembler::equal, L_holder_found);
4117 testptr(temp_itbl_klass, temp_itbl_klass);
4118 jccb(Assembler::notZero, L_scan_holder);
4119
4120 jmpb(L_no_such_interface);
4121
4122 // Loop: Look for resolved_class record in itable
4123 // do {
4124 // tmp = itable[index];
4125 // index += step;
4126 // if (tmp == holder_klass) {
4127 // // Also check if we have met a holder klass
4128 // holder_tmp = itable[index-step-ioffset];
4129 // }
4130 // if (tmp == resolved_klass) {
4131 // goto L_resolved_found; // Found!
4132 // }
4133 // } while (tmp != 0);
4134 // goto L_no_such_interface // Not found.
4135 //
4136 Label L_loop_scan_resolved;
4137 bind(L_loop_scan_resolved);
4138 movptr(temp_itbl_klass, Address(scan_temp, 0));
4139 addptr(scan_temp, scan_step);
4140 bind(L_loop_scan_resolved_entry);
4141 cmpptr(holder_klass, temp_itbl_klass);
4142 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4143 cmpptr(resolved_klass, temp_itbl_klass);
4144 jccb(Assembler::equal, L_resolved_found);
4145 testptr(temp_itbl_klass, temp_itbl_klass);
4146 jccb(Assembler::notZero, L_loop_scan_resolved);
4147
4148 jmpb(L_no_such_interface);
4149
4150 Label L_ready;
4151
4152 // See if we already have a holder klass. If not, go and scan for it.
4153 bind(L_resolved_found);
4154 testptr(temp_reg, temp_reg);
4155 jccb(Assembler::zero, L_scan_holder);
4156 jmpb(L_ready);
4157
4158 bind(L_holder_found);
4159 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4160
4161 // Finally, temp_reg contains holder_klass vtable offset
4162 bind(L_ready);
4163 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4164 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
4165 load_klass(scan_temp, receiver, noreg);
4166 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4167 } else {
4168 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4169 }
4170 }
4171
4172
4173 // virtual method calling
4174 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4175 RegisterOrConstant vtable_index,
4176 Register method_result) {
4177 const ByteSize base = Klass::vtable_start_offset();
4178 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4179 Address vtable_entry_addr(recv_klass,
4180 vtable_index, Address::times_ptr,
4181 base + vtableEntry::method_offset());
4182 movptr(method_result, vtable_entry_addr);
4183 }
4184
4185
4186 void MacroAssembler::check_klass_subtype(Register sub_klass,
4187 Register super_klass,
4188 Register temp_reg,
4189 Label& L_success) {
4190 Label L_failure;
4191 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
4192 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
4193 bind(L_failure);
4194 }
4195
4196
4197 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4198 Register super_klass,
4199 Register temp_reg,
4200 Label* L_success,
4201 Label* L_failure,
4202 Label* L_slow_path,
4203 RegisterOrConstant super_check_offset) {
4204 assert_different_registers(sub_klass, super_klass, temp_reg);
4205 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4206 if (super_check_offset.is_register()) {
4207 assert_different_registers(sub_klass, super_klass,
4208 super_check_offset.as_register());
4209 } else if (must_load_sco) {
4210 assert(temp_reg != noreg, "supply either a temp or a register offset");
4211 }
4212
4213 Label L_fallthrough;
4214 int label_nulls = 0;
4215 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4216 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4217 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
4218 assert(label_nulls <= 1, "at most one null in the batch");
4219
4220 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4221 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4222 Address super_check_offset_addr(super_klass, sco_offset);
4223
4224 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4225 // range of a jccb. If this routine grows larger, reconsider at
4226 // least some of these.
4227 #define local_jcc(assembler_cond, label) \
4228 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4229 else jcc( assembler_cond, label) /*omit semi*/
4230
4231 // Hacked jmp, which may only be used just before L_fallthrough.
4232 #define final_jmp(label) \
4233 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4234 else jmp(label) /*omit semi*/
4235
4236 // If the pointers are equal, we are done (e.g., String[] elements).
4237 // This self-check enables sharing of secondary supertype arrays among
4238 // non-primary types such as array-of-interface. Otherwise, each such
4239 // type would need its own customized SSA.
4240 // We move this check to the front of the fast path because many
4241 // type checks are in fact trivially successful in this manner,
4242 // so we get a nicely predicted branch right at the start of the check.
4243 cmpptr(sub_klass, super_klass);
4244 local_jcc(Assembler::equal, *L_success);
4245
4246 // Check the supertype display:
4247 if (must_load_sco) {
4248 // Positive movl does right thing on LP64.
4249 movl(temp_reg, super_check_offset_addr);
4250 super_check_offset = RegisterOrConstant(temp_reg);
4251 }
4252 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4253 cmpptr(super_klass, super_check_addr); // load displayed supertype
4254
4255 // This check has worked decisively for primary supers.
4256 // Secondary supers are sought in the super_cache ('super_cache_addr').
4257 // (Secondary supers are interfaces and very deeply nested subtypes.)
4258 // This works in the same check above because of a tricky aliasing
4259 // between the super_cache and the primary super display elements.
4260 // (The 'super_check_addr' can address either, as the case requires.)
4261 // Note that the cache is updated below if it does not help us find
4262 // what we need immediately.
4263 // So if it was a primary super, we can just fail immediately.
4264 // Otherwise, it's the slow path for us (no success at this point).
4265
4266 if (super_check_offset.is_register()) {
4267 local_jcc(Assembler::equal, *L_success);
4268 cmpl(super_check_offset.as_register(), sc_offset);
4269 if (L_failure == &L_fallthrough) {
4270 local_jcc(Assembler::equal, *L_slow_path);
4271 } else {
4272 local_jcc(Assembler::notEqual, *L_failure);
4273 final_jmp(*L_slow_path);
4274 }
4275 } else if (super_check_offset.as_constant() == sc_offset) {
4276 // Need a slow path; fast failure is impossible.
4277 if (L_slow_path == &L_fallthrough) {
4278 local_jcc(Assembler::equal, *L_success);
4279 } else {
4280 local_jcc(Assembler::notEqual, *L_slow_path);
4281 final_jmp(*L_success);
4282 }
4283 } else {
4284 // No slow path; it's a fast decision.
4285 if (L_failure == &L_fallthrough) {
4286 local_jcc(Assembler::equal, *L_success);
4287 } else {
4288 local_jcc(Assembler::notEqual, *L_failure);
4289 final_jmp(*L_success);
4290 }
4291 }
4292
4293 bind(L_fallthrough);
4294
4295 #undef local_jcc
4296 #undef final_jmp
4297 }
4298
4299
4300 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
4301 Register super_klass,
4302 Register temp_reg,
4303 Register temp2_reg,
4304 Label* L_success,
4305 Label* L_failure,
4306 bool set_cond_codes) {
4307 assert_different_registers(sub_klass, super_klass, temp_reg);
4308 if (temp2_reg != noreg)
4309 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4310 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4311
4312 Label L_fallthrough;
4313 int label_nulls = 0;
4314 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4315 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4316 assert(label_nulls <= 1, "at most one null in the batch");
4317
4318 // a couple of useful fields in sub_klass:
4319 int ss_offset = in_bytes(Klass::secondary_supers_offset());
4320 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4321 Address secondary_supers_addr(sub_klass, ss_offset);
4322 Address super_cache_addr( sub_klass, sc_offset);
4323
4324 // Do a linear scan of the secondary super-klass chain.
4325 // This code is rarely used, so simplicity is a virtue here.
4326 // The repne_scan instruction uses fixed registers, which we must spill.
4327 // Don't worry too much about pre-existing connections with the input regs.
4328
4329 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
4330 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
4331
4332 // Get super_klass value into rax (even if it was in rdi or rcx).
4333 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
4334 if (super_klass != rax) {
4335 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
4336 mov(rax, super_klass);
4337 }
4338 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
4339 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
4340
4341 #ifndef PRODUCT
4342 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
4343 ExternalAddress pst_counter_addr((address) pst_counter);
4344 lea(rcx, pst_counter_addr);
4345 incrementl(Address(rcx, 0));
4346 #endif //PRODUCT
4347
4348 // We will consult the secondary-super array.
4349 movptr(rdi, secondary_supers_addr);
4350 // Load the array length. (Positive movl does right thing on LP64.)
4351 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
4352 // Skip to start of data.
4353 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
4354
4355 // Scan RCX words at [RDI] for an occurrence of RAX.
4356 // Set NZ/Z based on last compare.
4357 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
4358 // not change flags (only scas instruction which is repeated sets flags).
4359 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
4360
4361 testptr(rax,rax); // Set Z = 0
4362 repne_scan();
4363
4364 // Unspill the temp. registers:
4365 if (pushed_rdi) pop(rdi);
4366 if (pushed_rcx) pop(rcx);
4367 if (pushed_rax) pop(rax);
4368
4369 if (set_cond_codes) {
4370 // Special hack for the AD files: rdi is guaranteed non-zero.
4371 assert(!pushed_rdi, "rdi must be left non-null");
4372 // Also, the condition codes are properly set Z/NZ on succeed/failure.
4373 }
4374
4375 if (L_failure == &L_fallthrough)
4376 jccb(Assembler::notEqual, *L_failure);
4377 else jcc(Assembler::notEqual, *L_failure);
4378
4379 // Success. Cache the super we found and proceed in triumph.
4380 movptr(super_cache_addr, super_klass);
4381
4382 if (L_success != &L_fallthrough) {
4383 jmp(*L_success);
4384 }
4385
4386 #undef IS_A_TEMP
4387
4388 bind(L_fallthrough);
4389 }
4390
4391 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4392 Register super_klass,
4393 Register temp_reg,
4394 Register temp2_reg,
4395 Label* L_success,
4396 Label* L_failure,
4397 bool set_cond_codes) {
4398 assert(set_cond_codes == false, "must be false on 64-bit x86");
4399 check_klass_subtype_slow_path
4400 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
4401 L_success, L_failure);
4402 }
4403
4404 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4405 Register super_klass,
4406 Register temp_reg,
4407 Register temp2_reg,
4408 Register temp3_reg,
4409 Register temp4_reg,
4410 Label* L_success,
4411 Label* L_failure) {
4412 if (UseSecondarySupersTable) {
4413 check_klass_subtype_slow_path_table
4414 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
4415 L_success, L_failure);
4416 } else {
4417 check_klass_subtype_slow_path_linear
4418 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
4419 }
4420 }
4421
4422 Register MacroAssembler::allocate_if_noreg(Register r,
4423 RegSetIterator<Register> &available_regs,
4424 RegSet ®s_to_push) {
4425 if (!r->is_valid()) {
4426 r = *available_regs++;
4427 regs_to_push += r;
4428 }
4429 return r;
4430 }
4431
4432 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
4433 Register super_klass,
4434 Register temp_reg,
4435 Register temp2_reg,
4436 Register temp3_reg,
4437 Register result_reg,
4438 Label* L_success,
4439 Label* L_failure) {
4440 // NB! Callers may assume that, when temp2_reg is a valid register,
4441 // this code sets it to a nonzero value.
4442 bool temp2_reg_was_valid = temp2_reg->is_valid();
4443
4444 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
4445
4446 Label L_fallthrough;
4447 int label_nulls = 0;
4448 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4449 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4450 assert(label_nulls <= 1, "at most one null in the batch");
4451
4452 BLOCK_COMMENT("check_klass_subtype_slow_path_table");
4453
4454 RegSetIterator<Register> available_regs
4455 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
4456
4457 RegSet pushed_regs;
4458
4459 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
4460 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
4461 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
4462 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
4463 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
4464
4465 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
4466
4467 {
4468
4469 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
4470 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
4471 subptr(rsp, aligned_size);
4472 push_set(pushed_regs, 0);
4473
4474 lookup_secondary_supers_table_var(sub_klass,
4475 super_klass,
4476 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
4477 cmpq(result_reg, 0);
4478
4479 // Unspill the temp. registers:
4480 pop_set(pushed_regs, 0);
4481 // Increment SP but do not clobber flags.
4482 lea(rsp, Address(rsp, aligned_size));
4483 }
4484
4485 if (temp2_reg_was_valid) {
4486 movq(temp2_reg, 1);
4487 }
4488
4489 jcc(Assembler::notEqual, *L_failure);
4490
4491 if (L_success != &L_fallthrough) {
4492 jmp(*L_success);
4493 }
4494
4495 bind(L_fallthrough);
4496 }
4497
4498 // population_count variant for running without the POPCNT
4499 // instruction, which was introduced with SSE4.2 in 2008.
4500 void MacroAssembler::population_count(Register dst, Register src,
4501 Register scratch1, Register scratch2) {
4502 assert_different_registers(src, scratch1, scratch2);
4503 if (UsePopCountInstruction) {
4504 Assembler::popcntq(dst, src);
4505 } else {
4506 assert_different_registers(src, scratch1, scratch2);
4507 assert_different_registers(dst, scratch1, scratch2);
4508 Label loop, done;
4509
4510 mov(scratch1, src);
4511 // dst = 0;
4512 // while(scratch1 != 0) {
4513 // dst++;
4514 // scratch1 &= (scratch1 - 1);
4515 // }
4516 xorl(dst, dst);
4517 testq(scratch1, scratch1);
4518 jccb(Assembler::equal, done);
4519 {
4520 bind(loop);
4521 incq(dst);
4522 movq(scratch2, scratch1);
4523 decq(scratch2);
4524 andq(scratch1, scratch2);
4525 jccb(Assembler::notEqual, loop);
4526 }
4527 bind(done);
4528 }
4529 #ifdef ASSERT
4530 mov64(scratch1, 0xCafeBabeDeadBeef);
4531 movq(scratch2, scratch1);
4532 #endif
4533 }
4534
4535 // Ensure that the inline code and the stub are using the same registers.
4536 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
4537 do { \
4538 assert(r_super_klass == rax, "mismatch"); \
4539 assert(r_array_base == rbx, "mismatch"); \
4540 assert(r_array_length == rcx, "mismatch"); \
4541 assert(r_array_index == rdx, "mismatch"); \
4542 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \
4543 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \
4544 assert(result == rdi || result == noreg, "mismatch"); \
4545 } while(0)
4546
4547 // Versions of salq and rorq that don't need count to be in rcx
4548
4549 void MacroAssembler::salq(Register dest, Register count) {
4550 if (count == rcx) {
4551 Assembler::salq(dest);
4552 } else {
4553 assert_different_registers(rcx, dest);
4554 xchgq(rcx, count);
4555 Assembler::salq(dest);
4556 xchgq(rcx, count);
4557 }
4558 }
4559
4560 void MacroAssembler::rorq(Register dest, Register count) {
4561 if (count == rcx) {
4562 Assembler::rorq(dest);
4563 } else {
4564 assert_different_registers(rcx, dest);
4565 xchgq(rcx, count);
4566 Assembler::rorq(dest);
4567 xchgq(rcx, count);
4568 }
4569 }
4570
4571 // Return true: we succeeded in generating this code
4572 //
4573 // At runtime, return 0 in result if r_super_klass is a superclass of
4574 // r_sub_klass, otherwise return nonzero. Use this if you know the
4575 // super_klass_slot of the class you're looking for. This is always
4576 // the case for instanceof and checkcast.
4577 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
4578 Register r_super_klass,
4579 Register temp1,
4580 Register temp2,
4581 Register temp3,
4582 Register temp4,
4583 Register result,
4584 u1 super_klass_slot) {
4585 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4586
4587 Label L_fallthrough, L_success, L_failure;
4588
4589 BLOCK_COMMENT("lookup_secondary_supers_table {");
4590
4591 const Register
4592 r_array_index = temp1,
4593 r_array_length = temp2,
4594 r_array_base = temp3,
4595 r_bitmap = temp4;
4596
4597 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
4598
4599 xorq(result, result); // = 0
4600
4601 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4602 movq(r_array_index, r_bitmap);
4603
4604 // First check the bitmap to see if super_klass might be present. If
4605 // the bit is zero, we are certain that super_klass is not one of
4606 // the secondary supers.
4607 u1 bit = super_klass_slot;
4608 {
4609 // NB: If the count in a x86 shift instruction is 0, the flags are
4610 // not affected, so we do a testq instead.
4611 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
4612 if (shift_count != 0) {
4613 salq(r_array_index, shift_count);
4614 } else {
4615 testq(r_array_index, r_array_index);
4616 }
4617 }
4618 // We test the MSB of r_array_index, i.e. its sign bit
4619 jcc(Assembler::positive, L_failure);
4620
4621 // Get the first array index that can contain super_klass into r_array_index.
4622 if (bit != 0) {
4623 population_count(r_array_index, r_array_index, temp2, temp3);
4624 } else {
4625 movl(r_array_index, 1);
4626 }
4627 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4628
4629 // We will consult the secondary-super array.
4630 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4631
4632 // We're asserting that the first word in an Array<Klass*> is the
4633 // length, and the second word is the first word of the data. If
4634 // that ever changes, r_array_base will have to be adjusted here.
4635 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4636 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4637
4638 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4639 jccb(Assembler::equal, L_success);
4640
4641 // Is there another entry to check? Consult the bitmap.
4642 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
4643 jccb(Assembler::carryClear, L_failure);
4644
4645 // Linear probe. Rotate the bitmap so that the next bit to test is
4646 // in Bit 1.
4647 if (bit != 0) {
4648 rorq(r_bitmap, bit);
4649 }
4650
4651 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4652 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4653 // Kills: r_array_length.
4654 // Returns: result.
4655 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
4656 // Result (0/1) is in rdi
4657 jmpb(L_fallthrough);
4658
4659 bind(L_failure);
4660 incq(result); // 0 => 1
4661
4662 bind(L_success);
4663 // result = 0;
4664
4665 bind(L_fallthrough);
4666 BLOCK_COMMENT("} lookup_secondary_supers_table");
4667
4668 if (VerifySecondarySupers) {
4669 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4670 temp1, temp2, temp3);
4671 }
4672 }
4673
4674 // At runtime, return 0 in result if r_super_klass is a superclass of
4675 // r_sub_klass, otherwise return nonzero. Use this version of
4676 // lookup_secondary_supers_table() if you don't know ahead of time
4677 // which superclass will be searched for. Used by interpreter and
4678 // runtime stubs. It is larger and has somewhat greater latency than
4679 // the version above, which takes a constant super_klass_slot.
4680 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
4681 Register r_super_klass,
4682 Register temp1,
4683 Register temp2,
4684 Register temp3,
4685 Register temp4,
4686 Register result) {
4687 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4688 assert_different_registers(r_sub_klass, r_super_klass, rcx);
4689 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
4690
4691 Label L_fallthrough, L_success, L_failure;
4692
4693 BLOCK_COMMENT("lookup_secondary_supers_table {");
4694
4695 RegSetIterator<Register> available_regs = (temps - rcx).begin();
4696
4697 // FIXME. Once we are sure that all paths reaching this point really
4698 // do pass rcx as one of our temps we can get rid of the following
4699 // workaround.
4700 assert(temps.contains(rcx), "fix this code");
4701
4702 // We prefer to have our shift count in rcx. If rcx is one of our
4703 // temps, use it for slot. If not, pick any of our temps.
4704 Register slot;
4705 if (!temps.contains(rcx)) {
4706 slot = *available_regs++;
4707 } else {
4708 slot = rcx;
4709 }
4710
4711 const Register r_array_index = *available_regs++;
4712 const Register r_bitmap = *available_regs++;
4713
4714 // The logic above guarantees this property, but we state it here.
4715 assert_different_registers(r_array_index, r_bitmap, rcx);
4716
4717 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4718 movq(r_array_index, r_bitmap);
4719
4720 // First check the bitmap to see if super_klass might be present. If
4721 // the bit is zero, we are certain that super_klass is not one of
4722 // the secondary supers.
4723 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4724 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
4725 salq(r_array_index, slot);
4726
4727 testq(r_array_index, r_array_index);
4728 // We test the MSB of r_array_index, i.e. its sign bit
4729 jcc(Assembler::positive, L_failure);
4730
4731 const Register r_array_base = *available_regs++;
4732
4733 // Get the first array index that can contain super_klass into r_array_index.
4734 // Note: Clobbers r_array_base and slot.
4735 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
4736
4737 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4738
4739 // We will consult the secondary-super array.
4740 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4741
4742 // We're asserting that the first word in an Array<Klass*> is the
4743 // length, and the second word is the first word of the data. If
4744 // that ever changes, r_array_base will have to be adjusted here.
4745 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4746 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4747
4748 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4749 jccb(Assembler::equal, L_success);
4750
4751 // Restore slot to its true value
4752 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4753
4754 // Linear probe. Rotate the bitmap so that the next bit to test is
4755 // in Bit 1.
4756 rorq(r_bitmap, slot);
4757
4758 // Is there another entry to check? Consult the bitmap.
4759 btq(r_bitmap, 1);
4760 jccb(Assembler::carryClear, L_failure);
4761
4762 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4763 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4764 // Kills: r_array_length.
4765 // Returns: result.
4766 lookup_secondary_supers_table_slow_path(r_super_klass,
4767 r_array_base,
4768 r_array_index,
4769 r_bitmap,
4770 /*temp1*/result,
4771 /*temp2*/slot,
4772 &L_success,
4773 nullptr);
4774
4775 bind(L_failure);
4776 movq(result, 1);
4777 jmpb(L_fallthrough);
4778
4779 bind(L_success);
4780 xorq(result, result); // = 0
4781
4782 bind(L_fallthrough);
4783 BLOCK_COMMENT("} lookup_secondary_supers_table");
4784
4785 if (VerifySecondarySupers) {
4786 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4787 temp1, temp2, temp3);
4788 }
4789 }
4790
4791 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
4792 Label* L_success, Label* L_failure) {
4793 Label L_loop, L_fallthrough;
4794 {
4795 int label_nulls = 0;
4796 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4797 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4798 assert(label_nulls <= 1, "at most one null in the batch");
4799 }
4800 bind(L_loop);
4801 cmpq(value, Address(addr, count, Address::times_8));
4802 jcc(Assembler::equal, *L_success);
4803 addl(count, 1);
4804 cmpl(count, limit);
4805 jcc(Assembler::less, L_loop);
4806
4807 if (&L_fallthrough != L_failure) {
4808 jmp(*L_failure);
4809 }
4810 bind(L_fallthrough);
4811 }
4812
4813 // Called by code generated by check_klass_subtype_slow_path
4814 // above. This is called when there is a collision in the hashed
4815 // lookup in the secondary supers array.
4816 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
4817 Register r_array_base,
4818 Register r_array_index,
4819 Register r_bitmap,
4820 Register temp1,
4821 Register temp2,
4822 Label* L_success,
4823 Label* L_failure) {
4824 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
4825
4826 const Register
4827 r_array_length = temp1,
4828 r_sub_klass = noreg,
4829 result = noreg;
4830
4831 Label L_fallthrough;
4832 int label_nulls = 0;
4833 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4834 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4835 assert(label_nulls <= 1, "at most one null in the batch");
4836
4837 // Load the array length.
4838 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4839 // And adjust the array base to point to the data.
4840 // NB! Effectively increments current slot index by 1.
4841 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
4842 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4843
4844 // Linear probe
4845 Label L_huge;
4846
4847 // The bitmap is full to bursting.
4848 // Implicit invariant: BITMAP_FULL implies (length > 0)
4849 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
4850 jcc(Assembler::greater, L_huge);
4851
4852 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
4853 // current slot (at secondary_supers[r_array_index]) has not yet
4854 // been inspected, and r_array_index may be out of bounds if we
4855 // wrapped around the end of the array.
4856
4857 { // This is conventional linear probing, but instead of terminating
4858 // when a null entry is found in the table, we maintain a bitmap
4859 // in which a 0 indicates missing entries.
4860 // The check above guarantees there are 0s in the bitmap, so the loop
4861 // eventually terminates.
4862
4863 xorl(temp2, temp2); // = 0;
4864
4865 Label L_again;
4866 bind(L_again);
4867
4868 // Check for array wraparound.
4869 cmpl(r_array_index, r_array_length);
4870 cmovl(Assembler::greaterEqual, r_array_index, temp2);
4871
4872 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4873 jcc(Assembler::equal, *L_success);
4874
4875 // If the next bit in bitmap is zero, we're done.
4876 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
4877 jcc(Assembler::carryClear, *L_failure);
4878
4879 rorq(r_bitmap, 1); // Bits 1/2 => 0/1
4880 addl(r_array_index, 1);
4881
4882 jmp(L_again);
4883 }
4884
4885 { // Degenerate case: more than 64 secondary supers.
4886 // FIXME: We could do something smarter here, maybe a vectorized
4887 // comparison or a binary search, but is that worth any added
4888 // complexity?
4889 bind(L_huge);
4890 xorl(r_array_index, r_array_index); // = 0
4891 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
4892 L_success,
4893 (&L_fallthrough != L_failure ? L_failure : nullptr));
4894
4895 bind(L_fallthrough);
4896 }
4897 }
4898
4899 struct VerifyHelperArguments {
4900 Klass* _super;
4901 Klass* _sub;
4902 intptr_t _linear_result;
4903 intptr_t _table_result;
4904 };
4905
4906 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
4907 Klass::on_secondary_supers_verification_failure(args->_super,
4908 args->_sub,
4909 args->_linear_result,
4910 args->_table_result,
4911 msg);
4912 }
4913
4914 // Make sure that the hashed lookup and a linear scan agree.
4915 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
4916 Register r_super_klass,
4917 Register result,
4918 Register temp1,
4919 Register temp2,
4920 Register temp3) {
4921 const Register
4922 r_array_index = temp1,
4923 r_array_length = temp2,
4924 r_array_base = temp3,
4925 r_bitmap = noreg;
4926
4927 BLOCK_COMMENT("verify_secondary_supers_table {");
4928
4929 Label L_success, L_failure, L_check, L_done;
4930
4931 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4932 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4933 // And adjust the array base to point to the data.
4934 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4935
4936 testl(r_array_length, r_array_length); // array_length == 0?
4937 jcc(Assembler::zero, L_failure);
4938
4939 movl(r_array_index, 0);
4940 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
4941 // fall through to L_failure
4942
4943 const Register linear_result = r_array_index; // reuse temp1
4944
4945 bind(L_failure); // not present
4946 movl(linear_result, 1);
4947 jmp(L_check);
4948
4949 bind(L_success); // present
4950 movl(linear_result, 0);
4951
4952 bind(L_check);
4953 cmpl(linear_result, result);
4954 jcc(Assembler::equal, L_done);
4955
4956 { // To avoid calling convention issues, build a record on the stack
4957 // and pass the pointer to that instead.
4958 push(result);
4959 push(linear_result);
4960 push(r_sub_klass);
4961 push(r_super_klass);
4962 movptr(c_rarg1, rsp);
4963 movptr(c_rarg0, (uintptr_t) "mismatch");
4964 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
4965 should_not_reach_here();
4966 }
4967 bind(L_done);
4968
4969 BLOCK_COMMENT("} verify_secondary_supers_table");
4970 }
4971
4972 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
4973
4974 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
4975 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
4976
4977 Label L_fallthrough;
4978 if (L_fast_path == nullptr) {
4979 L_fast_path = &L_fallthrough;
4980 } else if (L_slow_path == nullptr) {
4981 L_slow_path = &L_fallthrough;
4982 }
4983
4984 // Fast path check: class is fully initialized.
4985 // init_state needs acquire, but x86 is TSO, and so we are already good.
4986 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4987 jcc(Assembler::equal, *L_fast_path);
4988
4989 // Fast path check: current thread is initializer thread
4990 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
4991 if (L_slow_path == &L_fallthrough) {
4992 jcc(Assembler::equal, *L_fast_path);
4993 bind(*L_slow_path);
4994 } else if (L_fast_path == &L_fallthrough) {
4995 jcc(Assembler::notEqual, *L_slow_path);
4996 bind(*L_fast_path);
4997 } else {
4998 Unimplemented();
4999 }
5000 }
5001
5002 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
5003 if (VM_Version::supports_cmov()) {
5004 cmovl(cc, dst, src);
5005 } else {
5006 Label L;
5007 jccb(negate_condition(cc), L);
5008 movl(dst, src);
5009 bind(L);
5010 }
5011 }
5012
5013 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5014 if (VM_Version::supports_cmov()) {
5015 cmovl(cc, dst, src);
5016 } else {
5017 Label L;
5018 jccb(negate_condition(cc), L);
5019 movl(dst, src);
5020 bind(L);
5021 }
5022 }
5023
5024 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
5025 if (!VerifyOops || VerifyAdapterSharing) {
5026 // Below address of the code string confuses VerifyAdapterSharing
5027 // because it may differ between otherwise equivalent adapters.
5028 return;
5029 }
5030
5031 BLOCK_COMMENT("verify_oop {");
5032 push(rscratch1);
5033 push(rax); // save rax
5034 push(reg); // pass register argument
5035
5036 // Pass register number to verify_oop_subroutine
5037 const char* b = nullptr;
5038 {
5039 ResourceMark rm;
5040 stringStream ss;
5041 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
5042 b = code_string(ss.as_string());
5043 }
5044 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5045 pushptr(buffer.addr(), rscratch1);
5046
5047 // call indirectly to solve generation ordering problem
5048 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5049 call(rax);
5050 // Caller pops the arguments (oop, message) and restores rax, r10
5051 BLOCK_COMMENT("} verify_oop");
5052 }
5053
5054 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
5055 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
5056 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
5057 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
5058 vpternlogd(dst, 0xFF, dst, dst, vector_len);
5059 } else if (VM_Version::supports_avx()) {
5060 vpcmpeqd(dst, dst, dst, vector_len);
5061 } else {
5062 pcmpeqd(dst, dst);
5063 }
5064 }
5065
5066 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5067 int extra_slot_offset) {
5068 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5069 int stackElementSize = Interpreter::stackElementSize;
5070 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5071 #ifdef ASSERT
5072 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5073 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5074 #endif
5075 Register scale_reg = noreg;
5076 Address::ScaleFactor scale_factor = Address::no_scale;
5077 if (arg_slot.is_constant()) {
5078 offset += arg_slot.as_constant() * stackElementSize;
5079 } else {
5080 scale_reg = arg_slot.as_register();
5081 scale_factor = Address::times(stackElementSize);
5082 }
5083 offset += wordSize; // return PC is on stack
5084 return Address(rsp, scale_reg, scale_factor, offset);
5085 }
5086
5087 // Handle the receiver type profile update given the "recv" klass.
5088 //
5089 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
5090 // If there are no matching or claimable receiver entries in RD, updates
5091 // the polymorphic counter.
5092 //
5093 // This code expected to run by either the interpreter or JIT-ed code, without
5094 // extra synchronization. For safety, receiver cells are claimed atomically, which
5095 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
5096 // counter updates are not atomic.
5097 //
5098 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
5099 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
5100 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
5101 int poly_count_offset = in_bytes(CounterData::count_offset());
5102 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
5103 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
5104
5105 // Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
5106 assert(is_aligned(mdp_offset, BytesPerWord), "sanity");
5107 base_receiver_offset += mdp_offset;
5108 end_receiver_offset += mdp_offset;
5109 poly_count_offset += mdp_offset;
5110
5111 // Scale down to optimize encoding. Slots are pointer-sized.
5112 assert(is_aligned(base_receiver_offset, BytesPerWord), "sanity");
5113 assert(is_aligned(end_receiver_offset, BytesPerWord), "sanity");
5114 assert(is_aligned(poly_count_offset, BytesPerWord), "sanity");
5115 assert(is_aligned(receiver_step, BytesPerWord), "sanity");
5116 assert(is_aligned(receiver_to_count_step, BytesPerWord), "sanity");
5117 base_receiver_offset >>= LogBytesPerWord;
5118 end_receiver_offset >>= LogBytesPerWord;
5119 poly_count_offset >>= LogBytesPerWord;
5120 receiver_step >>= LogBytesPerWord;
5121 receiver_to_count_step >>= LogBytesPerWord;
5122
5123 #ifdef ASSERT
5124 // We are about to walk the MDO slots without asking for offsets.
5125 // Check that our math hits all the right spots.
5126 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
5127 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
5128 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
5129 int offset = base_receiver_offset + receiver_step*c;
5130 int count_offset = offset + receiver_to_count_step;
5131 assert((offset << LogBytesPerWord) == real_recv_offset, "receiver slot math");
5132 assert((count_offset << LogBytesPerWord) == real_count_offset, "receiver count math");
5133 }
5134 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
5135 assert(poly_count_offset << LogBytesPerWord == real_poly_count_offset, "poly counter math");
5136 #endif
5137
5138 // Corner case: no profile table. Increment poly counter and exit.
5139 if (ReceiverTypeData::row_limit() == 0) {
5140 addptr(Address(mdp, poly_count_offset, Address::times_ptr), DataLayout::counter_increment);
5141 return;
5142 }
5143
5144 Register offset = rscratch1;
5145
5146 Label L_loop_search_receiver, L_loop_search_empty;
5147 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
5148
5149 // The code here recognizes three major cases:
5150 // A. Fastest: receiver found in the table
5151 // B. Fast: no receiver in the table, and the table is full
5152 // C. Slow: no receiver in the table, free slots in the table
5153 //
5154 // The case A performance is most important, as perfectly-behaved code would end up
5155 // there, especially with larger TypeProfileWidth. The case B performance is
5156 // important as well, this is where bulk of code would land for normally megamorphic
5157 // cases. The case C performance is not essential, its job is to deal with installation
5158 // races, we optimize for code density instead. Case C needs to make sure that receiver
5159 // rows are only claimed once. This makes sure we never overwrite a row for another
5160 // receiver and never duplicate the receivers in the list, making profile type-accurate.
5161 //
5162 // It is very tempting to handle these cases in a single loop, and claim the first slot
5163 // without checking the rest of the table. But, profiling code should tolerate free slots
5164 // in the table, as class unloading can clear them. After such cleanup, the receiver
5165 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
5166 // to complete, before trying to install new slots. Splitting the code in several tight
5167 // loops also helpfully optimizes for cases A and B.
5168 //
5169 // This code is effectively:
5170 //
5171 // restart:
5172 // // Fastest: receiver is already installed
5173 // for (i = 0; i < receiver_count(); i++) {
5174 // if (receiver(i) == recv) goto found_recv(i);
5175 // }
5176 //
5177 // // Fast: no receiver, but profile is full
5178 // for (i = 0; i < receiver_count(); i++) {
5179 // if (receiver(i) == null) goto found_null(i);
5180 // }
5181 // goto polymorphic
5182 //
5183 // // Slow: try to install receiver
5184 // found_null(i):
5185 // CAS(&receiver(i), null, recv);
5186 // goto restart
5187 //
5188 // polymorphic:
5189 // count++;
5190 // return
5191 //
5192 // found_recv(i):
5193 // *receiver_count(i)++
5194 //
5195
5196 bind(L_restart);
5197
5198 // Fastest: receiver is already installed
5199 movptr(offset, base_receiver_offset);
5200 bind(L_loop_search_receiver);
5201 cmpptr(recv, Address(mdp, offset, Address::times_ptr));
5202 jccb(Assembler::equal, L_found_recv);
5203 addptr(offset, receiver_step);
5204 cmpptr(offset, end_receiver_offset);
5205 jccb(Assembler::notEqual, L_loop_search_receiver);
5206
5207 // Fast: no receiver, but profile is full
5208 movptr(offset, base_receiver_offset);
5209 bind(L_loop_search_empty);
5210 cmpptr(Address(mdp, offset, Address::times_ptr), NULL_WORD);
5211 jccb(Assembler::equal, L_found_empty);
5212 addptr(offset, receiver_step);
5213 cmpptr(offset, end_receiver_offset);
5214 jccb(Assembler::notEqual, L_loop_search_empty);
5215 jmpb(L_polymorphic);
5216
5217 // Slow: try to install receiver
5218 bind(L_found_empty);
5219
5220 // Atomically swing receiver slot: null -> recv.
5221 //
5222 // The update code uses CAS, which wants RAX register specifically, *and* it needs
5223 // other important registers untouched, as they form the address. Therefore, we need
5224 // to shift any important registers from RAX into some other spare register. If we
5225 // have a spare register, we are forced to save it on stack here.
5226
5227 Register spare_reg = noreg;
5228 Register shifted_mdp = mdp;
5229 Register shifted_recv = recv;
5230 if (recv == rax || mdp == rax) {
5231 spare_reg = (recv != rbx && mdp != rbx) ? rbx :
5232 (recv != rcx && mdp != rcx) ? rcx :
5233 rdx;
5234 assert_different_registers(mdp, recv, offset, spare_reg);
5235
5236 push(spare_reg);
5237 if (recv == rax) {
5238 movptr(spare_reg, recv);
5239 shifted_recv = spare_reg;
5240 } else {
5241 assert(mdp == rax, "Remaining case");
5242 movptr(spare_reg, mdp);
5243 shifted_mdp = spare_reg;
5244 }
5245 } else {
5246 push(rax);
5247 }
5248
5249 // None of the important registers are in RAX after this shuffle.
5250 assert_different_registers(rax, shifted_mdp, shifted_recv, offset);
5251
5252 xorptr(rax, rax);
5253 cmpxchgptr(shifted_recv, Address(shifted_mdp, offset, Address::times_ptr));
5254
5255 // Unshift registers.
5256 if (recv == rax || mdp == rax) {
5257 movptr(rax, spare_reg);
5258 pop(spare_reg);
5259 } else {
5260 pop(rax);
5261 }
5262
5263 // CAS success means the slot now has the receiver we want. CAS failure means
5264 // something had claimed the slot concurrently: it can be the same receiver we want,
5265 // or something else. Since this is a slow path, we can optimize for code density,
5266 // and just restart the search from the beginning.
5267 jmpb(L_restart);
5268
5269 // Counter updates:
5270
5271 // Increment polymorphic counter instead of receiver slot.
5272 bind(L_polymorphic);
5273 movptr(offset, poly_count_offset);
5274 jmpb(L_count_update);
5275
5276 // Found a receiver, convert its slot offset to corresponding count offset.
5277 bind(L_found_recv);
5278 addptr(offset, receiver_to_count_step);
5279
5280 bind(L_count_update);
5281 addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
5282 }
5283
5284 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5285 if (!VerifyOops || VerifyAdapterSharing) {
5286 // Below address of the code string confuses VerifyAdapterSharing
5287 // because it may differ between otherwise equivalent adapters.
5288 return;
5289 }
5290
5291 push(rscratch1);
5292 push(rax); // save rax,
5293 // addr may contain rsp so we will have to adjust it based on the push
5294 // we just did (and on 64 bit we do two pushes)
5295 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5296 // stores rax into addr which is backwards of what was intended.
5297 if (addr.uses(rsp)) {
5298 lea(rax, addr);
5299 pushptr(Address(rax, 2 * BytesPerWord));
5300 } else {
5301 pushptr(addr);
5302 }
5303
5304 // Pass register number to verify_oop_subroutine
5305 const char* b = nullptr;
5306 {
5307 ResourceMark rm;
5308 stringStream ss;
5309 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
5310 b = code_string(ss.as_string());
5311 }
5312 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5313 pushptr(buffer.addr(), rscratch1);
5314
5315 // call indirectly to solve generation ordering problem
5316 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5317 call(rax);
5318 // Caller pops the arguments (addr, message) and restores rax, r10.
5319 }
5320
5321 void MacroAssembler::verify_tlab() {
5322 #ifdef ASSERT
5323 if (UseTLAB && VerifyOops) {
5324 Label next, ok;
5325 Register t1 = rsi;
5326
5327 push(t1);
5328
5329 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5330 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
5331 jcc(Assembler::aboveEqual, next);
5332 STOP("assert(top >= start)");
5333 should_not_reach_here();
5334
5335 bind(next);
5336 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
5337 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5338 jcc(Assembler::aboveEqual, ok);
5339 STOP("assert(top <= end)");
5340 should_not_reach_here();
5341
5342 bind(ok);
5343 pop(t1);
5344 }
5345 #endif
5346 }
5347
5348 class ControlWord {
5349 public:
5350 int32_t _value;
5351
5352 int rounding_control() const { return (_value >> 10) & 3 ; }
5353 int precision_control() const { return (_value >> 8) & 3 ; }
5354 bool precision() const { return ((_value >> 5) & 1) != 0; }
5355 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5356 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5357 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5358 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5359 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5360
5361 void print() const {
5362 // rounding control
5363 const char* rc;
5364 switch (rounding_control()) {
5365 case 0: rc = "round near"; break;
5366 case 1: rc = "round down"; break;
5367 case 2: rc = "round up "; break;
5368 case 3: rc = "chop "; break;
5369 default:
5370 rc = nullptr; // silence compiler warnings
5371 fatal("Unknown rounding control: %d", rounding_control());
5372 };
5373 // precision control
5374 const char* pc;
5375 switch (precision_control()) {
5376 case 0: pc = "24 bits "; break;
5377 case 1: pc = "reserved"; break;
5378 case 2: pc = "53 bits "; break;
5379 case 3: pc = "64 bits "; break;
5380 default:
5381 pc = nullptr; // silence compiler warnings
5382 fatal("Unknown precision control: %d", precision_control());
5383 };
5384 // flags
5385 char f[9];
5386 f[0] = ' ';
5387 f[1] = ' ';
5388 f[2] = (precision ()) ? 'P' : 'p';
5389 f[3] = (underflow ()) ? 'U' : 'u';
5390 f[4] = (overflow ()) ? 'O' : 'o';
5391 f[5] = (zero_divide ()) ? 'Z' : 'z';
5392 f[6] = (denormalized()) ? 'D' : 'd';
5393 f[7] = (invalid ()) ? 'I' : 'i';
5394 f[8] = '\x0';
5395 // output
5396 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5397 }
5398
5399 };
5400
5401 class StatusWord {
5402 public:
5403 int32_t _value;
5404
5405 bool busy() const { return ((_value >> 15) & 1) != 0; }
5406 bool C3() const { return ((_value >> 14) & 1) != 0; }
5407 bool C2() const { return ((_value >> 10) & 1) != 0; }
5408 bool C1() const { return ((_value >> 9) & 1) != 0; }
5409 bool C0() const { return ((_value >> 8) & 1) != 0; }
5410 int top() const { return (_value >> 11) & 7 ; }
5411 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5412 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5413 bool precision() const { return ((_value >> 5) & 1) != 0; }
5414 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5415 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5416 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5417 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5418 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5419
5420 void print() const {
5421 // condition codes
5422 char c[5];
5423 c[0] = (C3()) ? '3' : '-';
5424 c[1] = (C2()) ? '2' : '-';
5425 c[2] = (C1()) ? '1' : '-';
5426 c[3] = (C0()) ? '0' : '-';
5427 c[4] = '\x0';
5428 // flags
5429 char f[9];
5430 f[0] = (error_status()) ? 'E' : '-';
5431 f[1] = (stack_fault ()) ? 'S' : '-';
5432 f[2] = (precision ()) ? 'P' : '-';
5433 f[3] = (underflow ()) ? 'U' : '-';
5434 f[4] = (overflow ()) ? 'O' : '-';
5435 f[5] = (zero_divide ()) ? 'Z' : '-';
5436 f[6] = (denormalized()) ? 'D' : '-';
5437 f[7] = (invalid ()) ? 'I' : '-';
5438 f[8] = '\x0';
5439 // output
5440 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5441 }
5442
5443 };
5444
5445 class TagWord {
5446 public:
5447 int32_t _value;
5448
5449 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5450
5451 void print() const {
5452 printf("%04x", _value & 0xFFFF);
5453 }
5454
5455 };
5456
5457 class FPU_Register {
5458 public:
5459 int32_t _m0;
5460 int32_t _m1;
5461 int16_t _ex;
5462
5463 bool is_indefinite() const {
5464 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5465 }
5466
5467 void print() const {
5468 char sign = (_ex < 0) ? '-' : '+';
5469 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5470 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5471 };
5472
5473 };
5474
5475 class FPU_State {
5476 public:
5477 enum {
5478 register_size = 10,
5479 number_of_registers = 8,
5480 register_mask = 7
5481 };
5482
5483 ControlWord _control_word;
5484 StatusWord _status_word;
5485 TagWord _tag_word;
5486 int32_t _error_offset;
5487 int32_t _error_selector;
5488 int32_t _data_offset;
5489 int32_t _data_selector;
5490 int8_t _register[register_size * number_of_registers];
5491
5492 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5493 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5494
5495 const char* tag_as_string(int tag) const {
5496 switch (tag) {
5497 case 0: return "valid";
5498 case 1: return "zero";
5499 case 2: return "special";
5500 case 3: return "empty";
5501 }
5502 ShouldNotReachHere();
5503 return nullptr;
5504 }
5505
5506 void print() const {
5507 // print computation registers
5508 { int t = _status_word.top();
5509 for (int i = 0; i < number_of_registers; i++) {
5510 int j = (i - t) & register_mask;
5511 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5512 st(j)->print();
5513 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5514 }
5515 }
5516 printf("\n");
5517 // print control registers
5518 printf("ctrl = "); _control_word.print(); printf("\n");
5519 printf("stat = "); _status_word .print(); printf("\n");
5520 printf("tags = "); _tag_word .print(); printf("\n");
5521 }
5522
5523 };
5524
5525 class Flag_Register {
5526 public:
5527 int32_t _value;
5528
5529 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5530 bool direction() const { return ((_value >> 10) & 1) != 0; }
5531 bool sign() const { return ((_value >> 7) & 1) != 0; }
5532 bool zero() const { return ((_value >> 6) & 1) != 0; }
5533 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5534 bool parity() const { return ((_value >> 2) & 1) != 0; }
5535 bool carry() const { return ((_value >> 0) & 1) != 0; }
5536
5537 void print() const {
5538 // flags
5539 char f[8];
5540 f[0] = (overflow ()) ? 'O' : '-';
5541 f[1] = (direction ()) ? 'D' : '-';
5542 f[2] = (sign ()) ? 'S' : '-';
5543 f[3] = (zero ()) ? 'Z' : '-';
5544 f[4] = (auxiliary_carry()) ? 'A' : '-';
5545 f[5] = (parity ()) ? 'P' : '-';
5546 f[6] = (carry ()) ? 'C' : '-';
5547 f[7] = '\x0';
5548 // output
5549 printf("%08x flags = %s", _value, f);
5550 }
5551
5552 };
5553
5554 class IU_Register {
5555 public:
5556 int32_t _value;
5557
5558 void print() const {
5559 printf("%08x %11d", _value, _value);
5560 }
5561
5562 };
5563
5564 class IU_State {
5565 public:
5566 Flag_Register _eflags;
5567 IU_Register _rdi;
5568 IU_Register _rsi;
5569 IU_Register _rbp;
5570 IU_Register _rsp;
5571 IU_Register _rbx;
5572 IU_Register _rdx;
5573 IU_Register _rcx;
5574 IU_Register _rax;
5575
5576 void print() const {
5577 // computation registers
5578 printf("rax, = "); _rax.print(); printf("\n");
5579 printf("rbx, = "); _rbx.print(); printf("\n");
5580 printf("rcx = "); _rcx.print(); printf("\n");
5581 printf("rdx = "); _rdx.print(); printf("\n");
5582 printf("rdi = "); _rdi.print(); printf("\n");
5583 printf("rsi = "); _rsi.print(); printf("\n");
5584 printf("rbp, = "); _rbp.print(); printf("\n");
5585 printf("rsp = "); _rsp.print(); printf("\n");
5586 printf("\n");
5587 // control registers
5588 printf("flgs = "); _eflags.print(); printf("\n");
5589 }
5590 };
5591
5592
5593 class CPU_State {
5594 public:
5595 FPU_State _fpu_state;
5596 IU_State _iu_state;
5597
5598 void print() const {
5599 printf("--------------------------------------------------\n");
5600 _iu_state .print();
5601 printf("\n");
5602 _fpu_state.print();
5603 printf("--------------------------------------------------\n");
5604 }
5605
5606 };
5607
5608
5609 static void _print_CPU_state(CPU_State* state) {
5610 state->print();
5611 };
5612
5613
5614 void MacroAssembler::print_CPU_state() {
5615 push_CPU_state();
5616 push(rsp); // pass CPU state
5617 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5618 addptr(rsp, wordSize); // discard argument
5619 pop_CPU_state();
5620 }
5621
5622 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
5623 // Either restore the MXCSR register after returning from the JNI Call
5624 // or verify that it wasn't changed (with -Xcheck:jni flag).
5625 if (VM_Version::supports_sse()) {
5626 if (RestoreMXCSROnJNICalls) {
5627 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
5628 } else if (CheckJNICalls) {
5629 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5630 }
5631 }
5632 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5633 vzeroupper();
5634 }
5635
5636 // ((OopHandle)result).resolve();
5637 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
5638 assert_different_registers(result, tmp);
5639
5640 // Only 64 bit platforms support GCs that require a tmp register
5641 // Only IN_HEAP loads require a thread_tmp register
5642 // OopHandle::resolve is an indirection like jobject.
5643 access_load_at(T_OBJECT, IN_NATIVE,
5644 result, Address(result, 0), tmp);
5645 }
5646
5647 // ((WeakHandle)result).resolve();
5648 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
5649 assert_different_registers(rresult, rtmp);
5650 Label resolved;
5651
5652 // A null weak handle resolves to null.
5653 cmpptr(rresult, 0);
5654 jcc(Assembler::equal, resolved);
5655
5656 // Only 64 bit platforms support GCs that require a tmp register
5657 // Only IN_HEAP loads require a thread_tmp register
5658 // WeakHandle::resolve is an indirection like jweak.
5659 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5660 rresult, Address(rresult, 0), rtmp);
5661 bind(resolved);
5662 }
5663
5664 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5665 // get mirror
5666 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5667 load_method_holder(mirror, method);
5668 movptr(mirror, Address(mirror, mirror_offset));
5669 resolve_oop_handle(mirror, tmp);
5670 }
5671
5672 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5673 load_method_holder(rresult, rmethod);
5674 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5675 }
5676
5677 void MacroAssembler::load_method_holder(Register holder, Register method) {
5678 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5679 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5680 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5681 }
5682
5683 void MacroAssembler::load_metadata(Register dst, Register src) {
5684 if (UseCompactObjectHeaders) {
5685 load_narrow_klass_compact(dst, src);
5686 } else {
5687 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5688 }
5689 }
5690
5691 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5692 assert(UseCompactObjectHeaders, "expect compact object headers");
5693 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5694 shrq(dst, markWord::klass_shift);
5695 }
5696
5697 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5698 assert_different_registers(src, tmp);
5699 assert_different_registers(dst, tmp);
5700
5701 if (UseCompactObjectHeaders) {
5702 load_narrow_klass_compact(dst, src);
5703 decode_klass_not_null(dst, tmp);
5704 } else {
5705 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5706 decode_klass_not_null(dst, tmp);
5707 }
5708 }
5709
5710 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
5711 load_klass(dst, src, tmp);
5712 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5713 }
5714
5715 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5716 assert(!UseCompactObjectHeaders, "not with compact headers");
5717 assert_different_registers(src, tmp);
5718 assert_different_registers(dst, tmp);
5719 encode_klass_not_null(src, tmp);
5720 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5721 }
5722
5723 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5724 if (UseCompactObjectHeaders) {
5725 assert(tmp != noreg, "need tmp");
5726 assert_different_registers(klass, obj, tmp);
5727 load_narrow_klass_compact(tmp, obj);
5728 cmpl(klass, tmp);
5729 } else {
5730 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5731 }
5732 }
5733
5734 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5735 if (UseCompactObjectHeaders) {
5736 assert(tmp2 != noreg, "need tmp2");
5737 assert_different_registers(obj1, obj2, tmp1, tmp2);
5738 load_narrow_klass_compact(tmp1, obj1);
5739 load_narrow_klass_compact(tmp2, obj2);
5740 cmpl(tmp1, tmp2);
5741 } else {
5742 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5743 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5744 }
5745 }
5746
5747 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5748 Register tmp1) {
5749 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5750 decorators = AccessInternal::decorator_fixup(decorators, type);
5751 bool as_raw = (decorators & AS_RAW) != 0;
5752 if (as_raw) {
5753 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
5754 } else {
5755 bs->load_at(this, decorators, type, dst, src, tmp1);
5756 }
5757 }
5758
5759 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5760 Register tmp1, Register tmp2, Register tmp3) {
5761 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5762 decorators = AccessInternal::decorator_fixup(decorators, type);
5763 bool as_raw = (decorators & AS_RAW) != 0;
5764 if (as_raw) {
5765 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5766 } else {
5767 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5768 }
5769 }
5770
5771 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5772 Register inline_layout_info) {
5773 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5774 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5775 }
5776
5777 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5778 movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
5779 movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
5780 }
5781
5782 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
5783 // ((address) (void*) o) + vk->payload_offset();
5784 Register offset = (data == oop) ? rscratch1 : data;
5785 payload_offset(inline_klass, offset);
5786 if (data == oop) {
5787 addptr(data, offset);
5788 } else {
5789 lea(data, Address(oop, offset));
5790 }
5791 }
5792
5793 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5794 Register index, Register data) {
5795 assert(index != rcx, "index needs to shift by rcx");
5796 assert_different_registers(array, array_klass, index);
5797 assert_different_registers(rcx, array, index);
5798
5799 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5800 movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
5801
5802 // Klass::layout_helper_log2_element_size(lh)
5803 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5804 shrl(rcx, Klass::_lh_log2_element_size_shift);
5805 andl(rcx, Klass::_lh_log2_element_size_mask);
5806 shlptr(index); // index << rcx
5807
5808 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
5809 }
5810
5811 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5812 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
5813 }
5814
5815 // Doesn't do verification, generates fixed size code
5816 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5817 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
5818 }
5819
5820 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5821 Register tmp2, Register tmp3, DecoratorSet decorators) {
5822 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5823 }
5824
5825 // Used for storing nulls.
5826 void MacroAssembler::store_heap_oop_null(Address dst) {
5827 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5828 }
5829
5830 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5831 assert(!UseCompactObjectHeaders, "Don't use with compact headers");
5832 // Store to klass gap in destination
5833 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5834 }
5835
5836 #ifdef ASSERT
5837 void MacroAssembler::verify_heapbase(const char* msg) {
5838 assert (UseCompressedOops, "should be compressed");
5839 assert (Universe::heap() != nullptr, "java heap should be initialized");
5840 if (CheckCompressedOops) {
5841 Label ok;
5842 ExternalAddress src2(CompressedOops::base_addr());
5843 const bool is_src2_reachable = reachable(src2);
5844 if (!is_src2_reachable) {
5845 push(rscratch1); // cmpptr trashes rscratch1
5846 }
5847 cmpptr(r12_heapbase, src2, rscratch1);
5848 jcc(Assembler::equal, ok);
5849 STOP(msg);
5850 bind(ok);
5851 if (!is_src2_reachable) {
5852 pop(rscratch1);
5853 }
5854 }
5855 }
5856 #endif
5857
5858 // Algorithm must match oop.inline.hpp encode_heap_oop.
5859 void MacroAssembler::encode_heap_oop(Register r) {
5860 #ifdef ASSERT
5861 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5862 #endif
5863 verify_oop_msg(r, "broken oop in encode_heap_oop");
5864 if (CompressedOops::base() == nullptr) {
5865 if (CompressedOops::shift() != 0) {
5866 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5867 shrq(r, LogMinObjAlignmentInBytes);
5868 }
5869 return;
5870 }
5871 testq(r, r);
5872 cmovq(Assembler::equal, r, r12_heapbase);
5873 subq(r, r12_heapbase);
5874 shrq(r, LogMinObjAlignmentInBytes);
5875 }
5876
5877 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5878 #ifdef ASSERT
5879 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5880 if (CheckCompressedOops) {
5881 Label ok;
5882 testq(r, r);
5883 jcc(Assembler::notEqual, ok);
5884 STOP("null oop passed to encode_heap_oop_not_null");
5885 bind(ok);
5886 }
5887 #endif
5888 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5889 if (CompressedOops::base() != nullptr) {
5890 subq(r, r12_heapbase);
5891 }
5892 if (CompressedOops::shift() != 0) {
5893 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5894 shrq(r, LogMinObjAlignmentInBytes);
5895 }
5896 }
5897
5898 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5899 #ifdef ASSERT
5900 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5901 if (CheckCompressedOops) {
5902 Label ok;
5903 testq(src, src);
5904 jcc(Assembler::notEqual, ok);
5905 STOP("null oop passed to encode_heap_oop_not_null2");
5906 bind(ok);
5907 }
5908 #endif
5909 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5910 if (dst != src) {
5911 movq(dst, src);
5912 }
5913 if (CompressedOops::base() != nullptr) {
5914 subq(dst, r12_heapbase);
5915 }
5916 if (CompressedOops::shift() != 0) {
5917 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5918 shrq(dst, LogMinObjAlignmentInBytes);
5919 }
5920 }
5921
5922 void MacroAssembler::decode_heap_oop(Register r) {
5923 #ifdef ASSERT
5924 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5925 #endif
5926 if (CompressedOops::base() == nullptr) {
5927 if (CompressedOops::shift() != 0) {
5928 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5929 shlq(r, LogMinObjAlignmentInBytes);
5930 }
5931 } else {
5932 Label done;
5933 shlq(r, LogMinObjAlignmentInBytes);
5934 jccb(Assembler::equal, done);
5935 addq(r, r12_heapbase);
5936 bind(done);
5937 }
5938 verify_oop_msg(r, "broken oop in decode_heap_oop");
5939 }
5940
5941 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5942 // Note: it will change flags
5943 assert (UseCompressedOops, "should only be used for compressed headers");
5944 assert (Universe::heap() != nullptr, "java heap should be initialized");
5945 // Cannot assert, unverified entry point counts instructions (see .ad file)
5946 // vtableStubs also counts instructions in pd_code_size_limit.
5947 // Also do not verify_oop as this is called by verify_oop.
5948 if (CompressedOops::shift() != 0) {
5949 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5950 shlq(r, LogMinObjAlignmentInBytes);
5951 if (CompressedOops::base() != nullptr) {
5952 addq(r, r12_heapbase);
5953 }
5954 } else {
5955 assert (CompressedOops::base() == nullptr, "sanity");
5956 }
5957 }
5958
5959 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5960 // Note: it will change flags
5961 assert (UseCompressedOops, "should only be used for compressed headers");
5962 assert (Universe::heap() != nullptr, "java heap should be initialized");
5963 // Cannot assert, unverified entry point counts instructions (see .ad file)
5964 // vtableStubs also counts instructions in pd_code_size_limit.
5965 // Also do not verify_oop as this is called by verify_oop.
5966 if (CompressedOops::shift() != 0) {
5967 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5968 if (LogMinObjAlignmentInBytes == Address::times_8) {
5969 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5970 } else {
5971 if (dst != src) {
5972 movq(dst, src);
5973 }
5974 shlq(dst, LogMinObjAlignmentInBytes);
5975 if (CompressedOops::base() != nullptr) {
5976 addq(dst, r12_heapbase);
5977 }
5978 }
5979 } else {
5980 assert (CompressedOops::base() == nullptr, "sanity");
5981 if (dst != src) {
5982 movq(dst, src);
5983 }
5984 }
5985 }
5986
5987 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5988 BLOCK_COMMENT("encode_klass_not_null {");
5989 assert_different_registers(r, tmp);
5990 if (CompressedKlassPointers::base() != nullptr) {
5991 if (AOTCodeCache::is_on_for_dump()) {
5992 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5993 } else {
5994 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5995 }
5996 subq(r, tmp);
5997 }
5998 if (CompressedKlassPointers::shift() != 0) {
5999 shrq(r, CompressedKlassPointers::shift());
6000 }
6001 BLOCK_COMMENT("} encode_klass_not_null");
6002 }
6003
6004 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
6005 BLOCK_COMMENT("encode_and_move_klass_not_null {");
6006 assert_different_registers(src, dst);
6007 if (CompressedKlassPointers::base() != nullptr) {
6008 if (AOTCodeCache::is_on_for_dump()) {
6009 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
6010 negq(dst);
6011 } else {
6012 movptr(dst, -(intptr_t)CompressedKlassPointers::base());
6013 }
6014 addq(dst, src);
6015 } else {
6016 movptr(dst, src);
6017 }
6018 if (CompressedKlassPointers::shift() != 0) {
6019 shrq(dst, CompressedKlassPointers::shift());
6020 }
6021 BLOCK_COMMENT("} encode_and_move_klass_not_null");
6022 }
6023
6024 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
6025 BLOCK_COMMENT("decode_klass_not_null {");
6026 assert_different_registers(r, tmp);
6027 // Note: it will change flags
6028 // Cannot assert, unverified entry point counts instructions (see .ad file)
6029 // vtableStubs also counts instructions in pd_code_size_limit.
6030 // Also do not verify_oop as this is called by verify_oop.
6031 if (CompressedKlassPointers::shift() != 0) {
6032 shlq(r, CompressedKlassPointers::shift());
6033 }
6034 if (CompressedKlassPointers::base() != nullptr) {
6035 if (AOTCodeCache::is_on_for_dump()) {
6036 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
6037 } else {
6038 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
6039 }
6040 addq(r, tmp);
6041 }
6042 BLOCK_COMMENT("} decode_klass_not_null");
6043 }
6044
6045 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
6046 BLOCK_COMMENT("decode_and_move_klass_not_null {");
6047 assert_different_registers(src, dst);
6048 // Note: it will change flags
6049 // Cannot assert, unverified entry point counts instructions (see .ad file)
6050 // vtableStubs also counts instructions in pd_code_size_limit.
6051 // Also do not verify_oop as this is called by verify_oop.
6052
6053 if (CompressedKlassPointers::base() == nullptr &&
6054 CompressedKlassPointers::shift() == 0) {
6055 // The best case scenario is that there is no base or shift. Then it is already
6056 // a pointer that needs nothing but a register rename.
6057 movl(dst, src);
6058 } else {
6059 if (CompressedKlassPointers::shift() <= Address::times_8) {
6060 if (CompressedKlassPointers::base() != nullptr) {
6061 if (AOTCodeCache::is_on_for_dump()) {
6062 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
6063 } else {
6064 movptr(dst, (intptr_t)CompressedKlassPointers::base());
6065 }
6066 } else {
6067 xorq(dst, dst);
6068 }
6069 if (CompressedKlassPointers::shift() != 0) {
6070 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
6071 leaq(dst, Address(dst, src, Address::times_8, 0));
6072 } else {
6073 addq(dst, src);
6074 }
6075 } else {
6076 if (CompressedKlassPointers::base() != nullptr) {
6077 if (AOTCodeCache::is_on_for_dump()) {
6078 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
6079 shrq(dst, CompressedKlassPointers::shift());
6080 } else {
6081 const intptr_t base_right_shifted =
6082 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
6083 movptr(dst, base_right_shifted);
6084 }
6085 } else {
6086 xorq(dst, dst);
6087 }
6088 addq(dst, src);
6089 shlq(dst, CompressedKlassPointers::shift());
6090 }
6091 }
6092 BLOCK_COMMENT("} decode_and_move_klass_not_null");
6093 }
6094
6095 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6096 assert (UseCompressedOops, "should only be used for compressed headers");
6097 assert (Universe::heap() != nullptr, "java heap should be initialized");
6098 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6099 int oop_index = oop_recorder()->find_index(obj);
6100 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6101 mov_narrow_oop(dst, oop_index, rspec);
6102 }
6103
6104 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6105 assert (UseCompressedOops, "should only be used for compressed headers");
6106 assert (Universe::heap() != nullptr, "java heap should be initialized");
6107 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6108 int oop_index = oop_recorder()->find_index(obj);
6109 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6110 mov_narrow_oop(dst, oop_index, rspec);
6111 }
6112
6113 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
6114 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6115 int klass_index = oop_recorder()->find_index(k);
6116 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6117 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6118 }
6119
6120 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6121 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6122 int klass_index = oop_recorder()->find_index(k);
6123 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6124 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6125 }
6126
6127 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6128 assert (UseCompressedOops, "should only be used for compressed headers");
6129 assert (Universe::heap() != nullptr, "java heap should be initialized");
6130 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6131 int oop_index = oop_recorder()->find_index(obj);
6132 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6133 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6134 }
6135
6136 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6137 assert (UseCompressedOops, "should only be used for compressed headers");
6138 assert (Universe::heap() != nullptr, "java heap should be initialized");
6139 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6140 int oop_index = oop_recorder()->find_index(obj);
6141 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6142 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6143 }
6144
6145 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6146 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6147 int klass_index = oop_recorder()->find_index(k);
6148 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6149 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6150 }
6151
6152 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6153 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6154 int klass_index = oop_recorder()->find_index(k);
6155 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6156 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6157 }
6158
6159 void MacroAssembler::reinit_heapbase() {
6160 if (UseCompressedOops) {
6161 if (Universe::heap() != nullptr && !AOTCodeCache::is_on_for_dump()) {
6162 if (CompressedOops::base() == nullptr) {
6163 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6164 } else {
6165 mov64(r12_heapbase, (int64_t)CompressedOops::base());
6166 }
6167 } else {
6168 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
6169 }
6170 }
6171 }
6172
6173 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6174 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6175 // An inline type might be returned. If fields are in registers we
6176 // need to allocate an inline type instance and initialize it with
6177 // the value of the fields.
6178 Label skip;
6179 // We only need a new buffered inline type if a new one is not returned
6180 testptr(rax, 1);
6181 jcc(Assembler::zero, skip);
6182 int call_offset = -1;
6183
6184 #ifdef _LP64
6185 // The following code is similar to allocate_instance but has some slight differences,
6186 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6187 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
6188 Label slow_case;
6189 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6190 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
6191 if (vk != nullptr) {
6192 // Called from C1, where the return type is statically known.
6193 movptr(rbx, (intptr_t)vk->get_InlineKlass());
6194 jint lh = vk->layout_helper();
6195 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6196 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
6197 tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
6198 } else {
6199 jmp(slow_case);
6200 }
6201 } else {
6202 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
6203 mov(rbx, rax);
6204 andptr(rbx, -2);
6205 if (UseTLAB) {
6206 movl(r14, Address(rbx, Klass::layout_helper_offset()));
6207 testl(r14, Klass::_lh_instance_slow_path_bit);
6208 jcc(Assembler::notZero, slow_case);
6209 tlab_allocate(rax, r14, 0, r13, r14, slow_case);
6210 } else {
6211 jmp(slow_case);
6212 }
6213 }
6214 if (UseTLAB) {
6215 // 2. Initialize buffered inline instance header
6216 Register buffer_obj = rax;
6217 Register klass = rbx;
6218 if (UseCompactObjectHeaders) {
6219 Register mark_word = r13;
6220 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
6221 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
6222 } else {
6223 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
6224 xorl(r13, r13);
6225 store_klass_gap(buffer_obj, r13);
6226 if (vk == nullptr) {
6227 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
6228 mov(r13, klass);
6229 }
6230 store_klass(buffer_obj, klass, rscratch1);
6231 klass = r13;
6232 }
6233 // 3. Initialize its fields with an inline class specific handler
6234 if (vk != nullptr) {
6235 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6236 } else {
6237 movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
6238 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
6239 call(rbx);
6240 }
6241 jmp(skip);
6242 }
6243 bind(slow_case);
6244 // We failed to allocate a new inline type, fall back to a runtime
6245 // call. Some oop field may be live in some registers but we can't
6246 // tell. That runtime call will take care of preserving them
6247 // across a GC if there's one.
6248 mov(rax, rscratch1);
6249 #endif
6250
6251 if (from_interpreter) {
6252 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
6253 } else {
6254 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
6255 call_offset = offset();
6256 }
6257
6258 bind(skip);
6259 return call_offset;
6260 }
6261
6262 // Move a value between registers/stack slots and update the reg_state
6263 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6264 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6265 if (reg_state[to->value()] == reg_written) {
6266 return true; // Already written
6267 }
6268 if (from != to && bt != T_VOID) {
6269 if (reg_state[to->value()] == reg_readonly) {
6270 return false; // Not yet writable
6271 }
6272 if (from->is_reg()) {
6273 if (to->is_reg()) {
6274 if (from->is_XMMRegister()) {
6275 if (bt == T_DOUBLE) {
6276 movdbl(to->as_XMMRegister(), from->as_XMMRegister());
6277 } else {
6278 assert(bt == T_FLOAT, "must be float");
6279 movflt(to->as_XMMRegister(), from->as_XMMRegister());
6280 }
6281 } else {
6282 movq(to->as_Register(), from->as_Register());
6283 }
6284 } else {
6285 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6286 Address to_addr = Address(rsp, st_off);
6287 if (from->is_XMMRegister()) {
6288 if (bt == T_DOUBLE) {
6289 movdbl(to_addr, from->as_XMMRegister());
6290 } else {
6291 assert(bt == T_FLOAT, "must be float");
6292 movflt(to_addr, from->as_XMMRegister());
6293 }
6294 } else {
6295 movq(to_addr, from->as_Register());
6296 }
6297 }
6298 } else {
6299 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
6300 if (to->is_reg()) {
6301 if (to->is_XMMRegister()) {
6302 if (bt == T_DOUBLE) {
6303 movdbl(to->as_XMMRegister(), from_addr);
6304 } else {
6305 assert(bt == T_FLOAT, "must be float");
6306 movflt(to->as_XMMRegister(), from_addr);
6307 }
6308 } else {
6309 movq(to->as_Register(), from_addr);
6310 }
6311 } else {
6312 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6313 movq(r13, from_addr);
6314 movq(Address(rsp, st_off), r13);
6315 }
6316 }
6317 }
6318 // Update register states
6319 reg_state[from->value()] = reg_writable;
6320 reg_state[to->value()] = reg_written;
6321 return true;
6322 }
6323
6324 // Calculate the extra stack space required for packing or unpacking inline
6325 // args and adjust the stack pointer (see MacroAssembler::remove_frame).
6326 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6327 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
6328 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6329 assert(sp_inc > 0, "sanity");
6330 // Two additional slots to account for return address
6331 sp_inc += 2 * VMRegImpl::stack_slot_size;
6332
6333 push(rbp);
6334 subptr(rsp, sp_inc);
6335 #ifdef ASSERT
6336 movl(Address(rsp, 0), badRegWordVal);
6337 movl(Address(rsp, VMRegImpl::stack_slot_size), badRegWordVal);
6338 #endif
6339 return sp_inc + wordSize; // account for rbp space
6340 }
6341
6342 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
6343 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6344 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6345 RegState reg_state[]) {
6346 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6347 assert(from->is_valid(), "source must be valid");
6348 bool progress = false;
6349 #ifdef ASSERT
6350 const int start_offset = offset();
6351 #endif
6352
6353 Label L_null, L_notNull;
6354 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6355 Register tmp1 = r10;
6356 Register tmp2 = r13;
6357 Register fromReg = noreg;
6358 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
6359 bool done = true;
6360 bool mark_done = true;
6361 VMReg toReg;
6362 BasicType bt;
6363 // Check if argument requires a null check
6364 bool null_check = false;
6365 VMReg nullCheckReg;
6366 while (stream.next(nullCheckReg, bt)) {
6367 if (sig->at(stream.sig_index())._offset == -1) {
6368 null_check = true;
6369 break;
6370 }
6371 }
6372 stream.reset(sig_index, to_index);
6373 while (stream.next(toReg, bt)) {
6374 assert(toReg->is_valid(), "destination must be valid");
6375 int idx = (int)toReg->value();
6376 if (reg_state[idx] == reg_readonly) {
6377 if (idx != from->value()) {
6378 mark_done = false;
6379 }
6380 done = false;
6381 continue;
6382 } else if (reg_state[idx] == reg_written) {
6383 continue;
6384 }
6385 assert(reg_state[idx] == reg_writable, "must be writable");
6386 reg_state[idx] = reg_written;
6387 progress = true;
6388
6389 if (fromReg == noreg) {
6390 if (from->is_reg()) {
6391 fromReg = from->as_Register();
6392 } else {
6393 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6394 movq(tmp1, Address(rsp, st_off));
6395 fromReg = tmp1;
6396 }
6397 if (null_check) {
6398 // Nullable inline type argument, emit null check
6399 testptr(fromReg, fromReg);
6400 jcc(Assembler::zero, L_null);
6401 }
6402 }
6403 int off = sig->at(stream.sig_index())._offset;
6404 if (off == -1) {
6405 assert(null_check, "Missing null check at");
6406 if (toReg->is_stack()) {
6407 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6408 movq(Address(rsp, st_off), 1);
6409 } else {
6410 movq(toReg->as_Register(), 1);
6411 }
6412 continue;
6413 }
6414 if (sig->at(stream.sig_index())._vt_oop) {
6415 if (toReg->is_stack()) {
6416 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6417 movq(Address(rsp, st_off), fromReg);
6418 } else {
6419 movq(toReg->as_Register(), fromReg);
6420 }
6421 continue;
6422 }
6423 assert(off > 0, "offset in object should be positive");
6424 Address fromAddr = Address(fromReg, off);
6425 if (!toReg->is_XMMRegister()) {
6426 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6427 if (is_reference_type(bt)) {
6428 load_heap_oop(dst, fromAddr);
6429 } else {
6430 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6431 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6432 }
6433 if (toReg->is_stack()) {
6434 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6435 movq(Address(rsp, st_off), dst);
6436 }
6437 } else if (bt == T_DOUBLE) {
6438 movdbl(toReg->as_XMMRegister(), fromAddr);
6439 } else {
6440 assert(bt == T_FLOAT, "must be float");
6441 movflt(toReg->as_XMMRegister(), fromAddr);
6442 }
6443 }
6444 if (progress && null_check) {
6445 if (done) {
6446 jmp(L_notNull);
6447 bind(L_null);
6448 // Set null marker to zero to signal that the argument is null.
6449 // Also set all fields to zero since the runtime requires a canonical
6450 // representation of a flat null.
6451 stream.reset(sig_index, to_index);
6452 while (stream.next(toReg, bt)) {
6453 if (toReg->is_stack()) {
6454 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6455 movq(Address(rsp, st_off), 0);
6456 } else if (toReg->is_XMMRegister()) {
6457 xorps(toReg->as_XMMRegister(), toReg->as_XMMRegister());
6458 } else {
6459 xorl(toReg->as_Register(), toReg->as_Register());
6460 }
6461 }
6462 bind(L_notNull);
6463 } else {
6464 bind(L_null);
6465 }
6466 }
6467
6468 sig_index = stream.sig_index();
6469 to_index = stream.regs_index();
6470
6471 if (mark_done && reg_state[from->value()] != reg_written) {
6472 // This is okay because no one else will write to that slot
6473 reg_state[from->value()] = reg_writable;
6474 }
6475 from_index--;
6476 assert(progress || (start_offset == offset()), "should not emit code");
6477 return done;
6478 }
6479
6480 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6481 VMRegPair* from, int from_count, int& from_index, VMReg to,
6482 RegState reg_state[], Register val_array) {
6483 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
6484 assert(to->is_valid(), "destination must be valid");
6485
6486 if (reg_state[to->value()] == reg_written) {
6487 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6488 return true; // Already written
6489 }
6490
6491 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6492 Register val_obj_tmp = r11;
6493 Register from_reg_tmp = r14;
6494 Register tmp1 = r10;
6495 Register tmp2 = r13;
6496 Register tmp3 = rbx;
6497 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6498
6499 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6500
6501 if (reg_state[to->value()] == reg_readonly) {
6502 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6503 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6504 return false; // Not yet writable
6505 }
6506 val_obj = val_obj_tmp;
6507 }
6508
6509 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6510 VMReg fromReg;
6511 BasicType bt;
6512 Label L_null;
6513 while (stream.next(fromReg, bt)) {
6514 assert(fromReg->is_valid(), "source must be valid");
6515 reg_state[fromReg->value()] = reg_writable;
6516
6517 int off = sig->at(stream.sig_index())._offset;
6518 if (off == -1) {
6519 // Nullable inline type argument, emit null check
6520 Label L_notNull;
6521 if (fromReg->is_stack()) {
6522 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6523 testb(Address(rsp, ld_off), 1);
6524 } else {
6525 testb(fromReg->as_Register(), 1);
6526 }
6527 jcc(Assembler::notZero, L_notNull);
6528 movptr(val_obj, 0);
6529 jmp(L_null);
6530 bind(L_notNull);
6531 continue;
6532 }
6533 if (sig->at(stream.sig_index())._vt_oop) {
6534 // buffer argument: use if non null
6535 if (fromReg->is_stack()) {
6536 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6537 movptr(val_obj, Address(rsp, ld_off));
6538 } else {
6539 movptr(val_obj, fromReg->as_Register());
6540 }
6541 testptr(val_obj, val_obj);
6542 jcc(Assembler::notEqual, L_null);
6543 // otherwise get the buffer from the just allocated pool of buffers
6544 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
6545 load_heap_oop(val_obj, Address(val_array, index));
6546 continue;
6547 }
6548
6549 assert(off > 0, "offset in object should be positive");
6550 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6551
6552 // Pack the scalarized field into the value object.
6553 Address dst(val_obj, off);
6554 if (!fromReg->is_XMMRegister()) {
6555 Register src;
6556 if (fromReg->is_stack()) {
6557 src = from_reg_tmp;
6558 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6559 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
6560 } else {
6561 src = fromReg->as_Register();
6562 }
6563 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6564 if (is_reference_type(bt)) {
6565 // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
6566 mov(tmp3, val_obj);
6567 Address dst_with_tmp3(tmp3, off);
6568 store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6569 } else {
6570 store_sized_value(dst, src, size_in_bytes);
6571 }
6572 } else if (bt == T_DOUBLE) {
6573 movdbl(dst, fromReg->as_XMMRegister());
6574 } else {
6575 assert(bt == T_FLOAT, "must be float");
6576 movflt(dst, fromReg->as_XMMRegister());
6577 }
6578 }
6579 bind(L_null);
6580 sig_index = stream.sig_index();
6581 from_index = stream.regs_index();
6582
6583 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6584 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6585 assert(success, "to register must be writeable");
6586 return true;
6587 }
6588
6589 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6590 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
6591 }
6592
6593 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6594 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6595 if (needs_stack_repair) {
6596 // The method has a scalarized entry point (where fields of value object arguments
6597 // are passed through registers and stack), and a non-scalarized entry point (where
6598 // value object arguments are given as oops). The non-scalarized entry point will
6599 // first load each field of value object arguments and store them in registers and on
6600 // the stack in a way compatible with the scalarized entry point. To do so, some extra
6601 // stack space might be reserved (if argument registers are not enough). On leaving the
6602 // method, this space must be freed.
6603 //
6604 // In case we used the non-scalarized entry point the stack looks like this:
6605 //
6606 // | Arguments from caller |
6607 // |---------------------------| <-- caller's SP
6608 // | Return address #1 |
6609 // | Saved RBP #1 |
6610 // |---------------------------|
6611 // | Extension space for |
6612 // | inline arg (un)packing |
6613 // |---------------------------| <-- start of this method's frame
6614 // | Return address #2 |
6615 // | Saved RBP #2 |
6616 // |---------------------------| <-- RBP (with -XX:+PreserveFramePointer)
6617 // | sp_inc |
6618 // | method locals |
6619 // |---------------------------| <-- SP
6620 //
6621 // Space for the return pc and saved rbp is reserved twice. But only the #1 copies
6622 // contain the real values of return pc and saved rbp. The #2 copies are not reliable
6623 // and should not be used. They are mostly needed to add space between the extension
6624 // space and the locals, as there would be between the real arguments and the locals
6625 // if we don't need to do unpacking (from the scalarized entry point).
6626 //
6627 // When leaving, one must load RBP #1 into RBP, and use the copy #1 of the return address,
6628 // while keeping in mind that from the scalarized entry point, there will be only one
6629 // copy. Indeed, in the case we used the scalarized calling convention, the stack looks like this:
6630 //
6631 // | Arguments from caller |
6632 // |---------------------------| <-- caller's SP
6633 // | Return address |
6634 // | Saved RBP |
6635 // |---------------------------| <-- FP (with -XX:+PreserveFramePointer)
6636 // | sp_inc |
6637 // | method locals |
6638 // |---------------------------| <-- SP
6639 //
6640 // The sp_inc stack slot holds the total size of the frame, including the extension
6641 // space and copies #2 of the return address and the saved RBP (but never the copies
6642 // #1 of the return address and saved RBP). That is how to find the copies #1 of the
6643 // return address and saved rbp. This size is expressed in bytes. Be careful when using
6644 // it from C++ in pointer arithmetic you might need to divide it by wordSize.
6645
6646 // The stack increment resides just below the saved rbp
6647 addq(rsp, Address(rsp, initial_framesize - wordSize));
6648 pop(rbp);
6649 } else {
6650 if (initial_framesize > 0) {
6651 addq(rsp, initial_framesize);
6652 }
6653 pop(rbp);
6654 }
6655 }
6656
6657 #if COMPILER2_OR_JVMCI
6658
6659 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6660 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
6661 // cnt - number of qwords (8-byte words).
6662 // base - start address, qword aligned.
6663 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6664 bool use64byteVector = (MaxVectorSize == 64) && (CopyAVX3Threshold == 0);
6665 if (use64byteVector) {
6666 evpbroadcastq(xtmp, val, AVX_512bit);
6667 } else if (MaxVectorSize >= 32) {
6668 movdq(xtmp, val);
6669 punpcklqdq(xtmp, xtmp);
6670 vinserti128_high(xtmp, xtmp);
6671 } else {
6672 movdq(xtmp, val);
6673 punpcklqdq(xtmp, xtmp);
6674 }
6675 jmp(L_zero_64_bytes);
6676
6677 BIND(L_loop);
6678 if (MaxVectorSize >= 32) {
6679 fill64(base, 0, xtmp, use64byteVector);
6680 } else {
6681 movdqu(Address(base, 0), xtmp);
6682 movdqu(Address(base, 16), xtmp);
6683 movdqu(Address(base, 32), xtmp);
6684 movdqu(Address(base, 48), xtmp);
6685 }
6686 addptr(base, 64);
6687
6688 BIND(L_zero_64_bytes);
6689 subptr(cnt, 8);
6690 jccb(Assembler::greaterEqual, L_loop);
6691
6692 // Copy trailing 64 bytes
6693 if (use64byteVector) {
6694 addptr(cnt, 8);
6695 jccb(Assembler::equal, L_end);
6696 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
6697 jmp(L_end);
6698 } else {
6699 addptr(cnt, 4);
6700 jccb(Assembler::less, L_tail);
6701 if (MaxVectorSize >= 32) {
6702 vmovdqu(Address(base, 0), xtmp);
6703 } else {
6704 movdqu(Address(base, 0), xtmp);
6705 movdqu(Address(base, 16), xtmp);
6706 }
6707 }
6708 addptr(base, 32);
6709 subptr(cnt, 4);
6710
6711 BIND(L_tail);
6712 addptr(cnt, 4);
6713 jccb(Assembler::lessEqual, L_end);
6714 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6715 fill32_masked(3, base, 0, xtmp, mask, cnt, val);
6716 } else {
6717 decrement(cnt);
6718
6719 BIND(L_sloop);
6720 movq(Address(base, 0), xtmp);
6721 addptr(base, 8);
6722 decrement(cnt);
6723 jccb(Assembler::greaterEqual, L_sloop);
6724 }
6725 BIND(L_end);
6726 }
6727
6728 // Clearing constant sized memory using YMM/ZMM registers.
6729 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6730 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
6731 bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
6732
6733 int vector64_count = (cnt & (~0x7)) >> 3;
6734 cnt = cnt & 0x7;
6735 const int fill64_per_loop = 4;
6736 const int max_unrolled_fill64 = 8;
6737
6738 // 64 byte initialization loop.
6739 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
6740 int start64 = 0;
6741 if (vector64_count > max_unrolled_fill64) {
6742 Label LOOP;
6743 Register index = rtmp;
6744
6745 start64 = vector64_count - (vector64_count % fill64_per_loop);
6746
6747 movl(index, 0);
6748 BIND(LOOP);
6749 for (int i = 0; i < fill64_per_loop; i++) {
6750 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
6751 }
6752 addl(index, fill64_per_loop * 64);
6753 cmpl(index, start64 * 64);
6754 jccb(Assembler::less, LOOP);
6755 }
6756 for (int i = start64; i < vector64_count; i++) {
6757 fill64(base, i * 64, xtmp, use64byteVector);
6758 }
6759
6760 // Clear remaining 64 byte tail.
6761 int disp = vector64_count * 64;
6762 if (cnt) {
6763 switch (cnt) {
6764 case 1:
6765 movq(Address(base, disp), xtmp);
6766 break;
6767 case 2:
6768 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
6769 break;
6770 case 3:
6771 movl(rtmp, 0x7);
6772 kmovwl(mask, rtmp);
6773 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
6774 break;
6775 case 4:
6776 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6777 break;
6778 case 5:
6779 if (use64byteVector) {
6780 movl(rtmp, 0x1F);
6781 kmovwl(mask, rtmp);
6782 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6783 } else {
6784 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6785 movq(Address(base, disp + 32), xtmp);
6786 }
6787 break;
6788 case 6:
6789 if (use64byteVector) {
6790 movl(rtmp, 0x3F);
6791 kmovwl(mask, rtmp);
6792 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6793 } else {
6794 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6795 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
6796 }
6797 break;
6798 case 7:
6799 if (use64byteVector) {
6800 movl(rtmp, 0x7F);
6801 kmovwl(mask, rtmp);
6802 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6803 } else {
6804 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6805 movl(rtmp, 0x7);
6806 kmovwl(mask, rtmp);
6807 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
6808 }
6809 break;
6810 default:
6811 fatal("Unexpected length : %d\n",cnt);
6812 break;
6813 }
6814 }
6815 }
6816
6817 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
6818 bool is_large, bool word_copy_only, KRegister mask) {
6819 // cnt - number of qwords (8-byte words).
6820 // base - start address, qword aligned.
6821 // is_large - if optimizers know cnt is larger than InitArrayShortSize
6822 assert(base==rdi, "base register must be edi for rep stos");
6823 assert(val==rax, "val register must be eax for rep stos");
6824 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6825 assert(InitArrayShortSize % BytesPerLong == 0,
6826 "InitArrayShortSize should be the multiple of BytesPerLong");
6827
6828 Label DONE;
6829
6830 if (!is_large) {
6831 Label LOOP, LONG;
6832 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
6833 jccb(Assembler::greater, LONG);
6834
6835 decrement(cnt);
6836 jccb(Assembler::negative, DONE); // Zero length
6837
6838 // Use individual pointer-sized stores for small counts:
6839 BIND(LOOP);
6840 movptr(Address(base, cnt, Address::times_ptr), val);
6841 decrement(cnt);
6842 jccb(Assembler::greaterEqual, LOOP);
6843 jmpb(DONE);
6844
6845 BIND(LONG);
6846 }
6847
6848 // Use longer rep-prefixed ops for non-small counts:
6849 if (UseFastStosb && !word_copy_only) {
6850 shlptr(cnt, 3); // convert to number of bytes
6851 rep_stosb();
6852 } else if (UseXMMForObjInit) {
6853 xmm_clear_mem(base, cnt, val, xtmp, mask);
6854 } else {
6855 rep_stos();
6856 }
6857
6858 BIND(DONE);
6859 }
6860
6861 #endif //COMPILER2_OR_JVMCI
6862
6863
6864 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6865 Register to, Register value, Register count,
6866 Register rtmp, XMMRegister xtmp) {
6867 ShortBranchVerifier sbv(this);
6868 assert_different_registers(to, value, count, rtmp);
6869 Label L_exit;
6870 Label L_fill_2_bytes, L_fill_4_bytes;
6871
6872 #if defined(COMPILER2)
6873 if(MaxVectorSize >=32 &&
6874 VM_Version::supports_avx512vlbw() &&
6875 VM_Version::supports_bmi2()) {
6876 generate_fill_avx3(t, to, value, count, rtmp, xtmp);
6877 return;
6878 }
6879 #endif
6880
6881 int shift = -1;
6882 switch (t) {
6883 case T_BYTE:
6884 shift = 2;
6885 break;
6886 case T_SHORT:
6887 shift = 1;
6888 break;
6889 case T_INT:
6890 shift = 0;
6891 break;
6892 default: ShouldNotReachHere();
6893 }
6894
6895 if (t == T_BYTE) {
6896 andl(value, 0xff);
6897 movl(rtmp, value);
6898 shll(rtmp, 8);
6899 orl(value, rtmp);
6900 }
6901 if (t == T_SHORT) {
6902 andl(value, 0xffff);
6903 }
6904 if (t == T_BYTE || t == T_SHORT) {
6905 movl(rtmp, value);
6906 shll(rtmp, 16);
6907 orl(value, rtmp);
6908 }
6909
6910 cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
6911 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
6912 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
6913 Label L_skip_align2;
6914 // align source address at 4 bytes address boundary
6915 if (t == T_BYTE) {
6916 Label L_skip_align1;
6917 // One byte misalignment happens only for byte arrays
6918 testptr(to, 1);
6919 jccb(Assembler::zero, L_skip_align1);
6920 movb(Address(to, 0), value);
6921 increment(to);
6922 decrement(count);
6923 BIND(L_skip_align1);
6924 }
6925 // Two bytes misalignment happens only for byte and short (char) arrays
6926 testptr(to, 2);
6927 jccb(Assembler::zero, L_skip_align2);
6928 movw(Address(to, 0), value);
6929 addptr(to, 2);
6930 subptr(count, 1<<(shift-1));
6931 BIND(L_skip_align2);
6932 }
6933 {
6934 Label L_fill_32_bytes;
6935 if (!UseUnalignedLoadStores) {
6936 // align to 8 bytes, we know we are 4 byte aligned to start
6937 testptr(to, 4);
6938 jccb(Assembler::zero, L_fill_32_bytes);
6939 movl(Address(to, 0), value);
6940 addptr(to, 4);
6941 subptr(count, 1<<shift);
6942 }
6943 BIND(L_fill_32_bytes);
6944 {
6945 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
6946 movdl(xtmp, value);
6947 if (UseAVX >= 2 && UseUnalignedLoadStores) {
6948 Label L_check_fill_32_bytes;
6949 if (UseAVX > 2) {
6950 // Fill 64-byte chunks
6951 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
6952
6953 // If number of bytes to fill < CopyAVX3Threshold, perform fill using AVX2
6954 cmpptr(count, CopyAVX3Threshold);
6955 jccb(Assembler::below, L_check_fill_64_bytes_avx2);
6956
6957 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
6958
6959 subptr(count, 16 << shift);
6960 jcc(Assembler::less, L_check_fill_32_bytes);
6961 align(16);
6962
6963 BIND(L_fill_64_bytes_loop_avx3);
6964 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
6965 addptr(to, 64);
6966 subptr(count, 16 << shift);
6967 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
6968 jmpb(L_check_fill_32_bytes);
6969
6970 BIND(L_check_fill_64_bytes_avx2);
6971 }
6972 // Fill 64-byte chunks
6973 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
6974
6975 subptr(count, 16 << shift);
6976 jcc(Assembler::less, L_check_fill_32_bytes);
6977
6978 // align data for 64-byte chunks
6979 Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
6980 if (EnableX86ECoreOpts) {
6981 // align 'big' arrays to cache lines to minimize split_stores
6982 cmpptr(count, 96 << shift);
6983 jcc(Assembler::below, L_fill_64_bytes_loop);
6984
6985 // Find the bytes needed for alignment
6986 movptr(rtmp, to);
6987 andptr(rtmp, 0x1c);
6988 jcc(Assembler::zero, L_fill_64_bytes_loop);
6989 negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
6990 addptr(rtmp, 32);
6991 shrptr(rtmp, 2 - shift);// get number of elements from bytes
6992 subptr(count, rtmp); // adjust count by number of elements
6993
6994 align(16);
6995 BIND(L_align_64_bytes_loop);
6996 movdl(Address(to, 0), xtmp);
6997 addptr(to, 4);
6998 subptr(rtmp, 1 << shift);
6999 jcc(Assembler::greater, L_align_64_bytes_loop);
7000 }
7001
7002 align(16);
7003 BIND(L_fill_64_bytes_loop);
7004 vmovdqu(Address(to, 0), xtmp);
7005 vmovdqu(Address(to, 32), xtmp);
7006 addptr(to, 64);
7007 subptr(count, 16 << shift);
7008 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7009
7010 align(16);
7011 BIND(L_check_fill_32_bytes);
7012 addptr(count, 8 << shift);
7013 jccb(Assembler::less, L_check_fill_8_bytes);
7014 vmovdqu(Address(to, 0), xtmp);
7015 addptr(to, 32);
7016 subptr(count, 8 << shift);
7017
7018 BIND(L_check_fill_8_bytes);
7019 // clean upper bits of YMM registers
7020 movdl(xtmp, value);
7021 pshufd(xtmp, xtmp, 0);
7022 } else {
7023 // Fill 32-byte chunks
7024 pshufd(xtmp, xtmp, 0);
7025
7026 subptr(count, 8 << shift);
7027 jcc(Assembler::less, L_check_fill_8_bytes);
7028 align(16);
7029
7030 BIND(L_fill_32_bytes_loop);
7031
7032 if (UseUnalignedLoadStores) {
7033 movdqu(Address(to, 0), xtmp);
7034 movdqu(Address(to, 16), xtmp);
7035 } else {
7036 movq(Address(to, 0), xtmp);
7037 movq(Address(to, 8), xtmp);
7038 movq(Address(to, 16), xtmp);
7039 movq(Address(to, 24), xtmp);
7040 }
7041
7042 addptr(to, 32);
7043 subptr(count, 8 << shift);
7044 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7045
7046 BIND(L_check_fill_8_bytes);
7047 }
7048 addptr(count, 8 << shift);
7049 jccb(Assembler::zero, L_exit);
7050 jmpb(L_fill_8_bytes);
7051
7052 //
7053 // length is too short, just fill qwords
7054 //
7055 align(16);
7056 BIND(L_fill_8_bytes_loop);
7057 movq(Address(to, 0), xtmp);
7058 addptr(to, 8);
7059 BIND(L_fill_8_bytes);
7060 subptr(count, 1 << (shift + 1));
7061 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7062 }
7063 }
7064
7065 Label L_fill_4_bytes_loop;
7066 testl(count, 1 << shift);
7067 jccb(Assembler::zero, L_fill_2_bytes);
7068
7069 align(16);
7070 BIND(L_fill_4_bytes_loop);
7071 movl(Address(to, 0), value);
7072 addptr(to, 4);
7073
7074 BIND(L_fill_4_bytes);
7075 subptr(count, 1 << shift);
7076 jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
7077
7078 if (t == T_BYTE || t == T_SHORT) {
7079 Label L_fill_byte;
7080 BIND(L_fill_2_bytes);
7081 // fill trailing 2 bytes
7082 testl(count, 1<<(shift-1));
7083 jccb(Assembler::zero, L_fill_byte);
7084 movw(Address(to, 0), value);
7085 if (t == T_BYTE) {
7086 addptr(to, 2);
7087 BIND(L_fill_byte);
7088 // fill trailing byte
7089 testl(count, 1);
7090 jccb(Assembler::zero, L_exit);
7091 movb(Address(to, 0), value);
7092 } else {
7093 BIND(L_fill_byte);
7094 }
7095 } else {
7096 BIND(L_fill_2_bytes);
7097 }
7098 BIND(L_exit);
7099 }
7100
7101 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
7102 switch(type) {
7103 case T_BYTE:
7104 case T_BOOLEAN:
7105 evpbroadcastb(dst, src, vector_len);
7106 break;
7107 case T_SHORT:
7108 case T_CHAR:
7109 evpbroadcastw(dst, src, vector_len);
7110 break;
7111 case T_INT:
7112 case T_FLOAT:
7113 evpbroadcastd(dst, src, vector_len);
7114 break;
7115 case T_LONG:
7116 case T_DOUBLE:
7117 evpbroadcastq(dst, src, vector_len);
7118 break;
7119 default:
7120 fatal("Unhandled type : %s", type2name(type));
7121 break;
7122 }
7123 }
7124
7125 // Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
7126 //
7127 // @IntrinsicCandidate
7128 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
7129 // char[] sa, int sp, byte[] da, int dp, int len) {
7130 // int i = 0;
7131 // for (; i < len; i++) {
7132 // char c = sa[sp++];
7133 // if (c > '\u00FF')
7134 // break;
7135 // da[dp++] = (byte) c;
7136 // }
7137 // return i;
7138 // }
7139 //
7140 // @IntrinsicCandidate
7141 // int java.lang.StringCoding.encodeISOArray0(
7142 // byte[] sa, int sp, byte[] da, int dp, int len) {
7143 // int i = 0;
7144 // for (; i < len; i++) {
7145 // char c = StringUTF16.getChar(sa, sp++);
7146 // if (c > '\u00FF')
7147 // break;
7148 // da[dp++] = (byte) c;
7149 // }
7150 // return i;
7151 // }
7152 //
7153 // @IntrinsicCandidate
7154 // int java.lang.StringCoding.encodeAsciiArray0(
7155 // char[] sa, int sp, byte[] da, int dp, int len) {
7156 // int i = 0;
7157 // for (; i < len; i++) {
7158 // char c = sa[sp++];
7159 // if (c >= '\u0080')
7160 // break;
7161 // da[dp++] = (byte) c;
7162 // }
7163 // return i;
7164 // }
7165 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7166 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7167 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7168 Register tmp5, Register result, bool ascii) {
7169
7170 // rsi: src
7171 // rdi: dst
7172 // rdx: len
7173 // rcx: tmp5
7174 // rax: result
7175 ShortBranchVerifier sbv(this);
7176 assert_different_registers(src, dst, len, tmp5, result);
7177 Label L_done, L_copy_1_char, L_copy_1_char_exit;
7178
7179 int mask = ascii ? 0xff80ff80 : 0xff00ff00;
7180 int short_mask = ascii ? 0xff80 : 0xff00;
7181
7182 // set result
7183 xorl(result, result);
7184 // check for zero length
7185 testl(len, len);
7186 jcc(Assembler::zero, L_done);
7187
7188 movl(result, len);
7189
7190 // Setup pointers
7191 lea(src, Address(src, len, Address::times_2)); // char[]
7192 lea(dst, Address(dst, len, Address::times_1)); // byte[]
7193 negptr(len);
7194
7195 if (UseSSE42Intrinsics || UseAVX >= 2) {
7196 Label L_copy_8_chars, L_copy_8_chars_exit;
7197 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7198
7199 if (UseAVX >= 2) {
7200 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7201 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7202 movdl(tmp1Reg, tmp5);
7203 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
7204 jmp(L_chars_32_check);
7205
7206 bind(L_copy_32_chars);
7207 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7208 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7209 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7210 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7211 jccb(Assembler::notZero, L_copy_32_chars_exit);
7212 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7213 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
7214 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7215
7216 bind(L_chars_32_check);
7217 addptr(len, 32);
7218 jcc(Assembler::lessEqual, L_copy_32_chars);
7219
7220 bind(L_copy_32_chars_exit);
7221 subptr(len, 16);
7222 jccb(Assembler::greater, L_copy_16_chars_exit);
7223
7224 } else if (UseSSE42Intrinsics) {
7225 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7226 movdl(tmp1Reg, tmp5);
7227 pshufd(tmp1Reg, tmp1Reg, 0);
7228 jmpb(L_chars_16_check);
7229 }
7230
7231 bind(L_copy_16_chars);
7232 if (UseAVX >= 2) {
7233 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7234 vptest(tmp2Reg, tmp1Reg);
7235 jcc(Assembler::notZero, L_copy_16_chars_exit);
7236 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
7237 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
7238 } else {
7239 if (UseAVX > 0) {
7240 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7241 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7242 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
7243 } else {
7244 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7245 por(tmp2Reg, tmp3Reg);
7246 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7247 por(tmp2Reg, tmp4Reg);
7248 }
7249 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7250 jccb(Assembler::notZero, L_copy_16_chars_exit);
7251 packuswb(tmp3Reg, tmp4Reg);
7252 }
7253 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7254
7255 bind(L_chars_16_check);
7256 addptr(len, 16);
7257 jcc(Assembler::lessEqual, L_copy_16_chars);
7258
7259 bind(L_copy_16_chars_exit);
7260 if (UseAVX >= 2) {
7261 // clean upper bits of YMM registers
7262 vpxor(tmp2Reg, tmp2Reg);
7263 vpxor(tmp3Reg, tmp3Reg);
7264 vpxor(tmp4Reg, tmp4Reg);
7265 movdl(tmp1Reg, tmp5);
7266 pshufd(tmp1Reg, tmp1Reg, 0);
7267 }
7268 subptr(len, 8);
7269 jccb(Assembler::greater, L_copy_8_chars_exit);
7270
7271 bind(L_copy_8_chars);
7272 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7273 ptest(tmp3Reg, tmp1Reg);
7274 jccb(Assembler::notZero, L_copy_8_chars_exit);
7275 packuswb(tmp3Reg, tmp1Reg);
7276 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7277 addptr(len, 8);
7278 jccb(Assembler::lessEqual, L_copy_8_chars);
7279
7280 bind(L_copy_8_chars_exit);
7281 subptr(len, 8);
7282 jccb(Assembler::zero, L_done);
7283 }
7284
7285 bind(L_copy_1_char);
7286 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7287 testl(tmp5, short_mask); // check if Unicode or non-ASCII char
7288 jccb(Assembler::notZero, L_copy_1_char_exit);
7289 movb(Address(dst, len, Address::times_1, 0), tmp5);
7290 addptr(len, 1);
7291 jccb(Assembler::less, L_copy_1_char);
7292
7293 bind(L_copy_1_char_exit);
7294 addptr(result, len); // len is negative count of not processed elements
7295
7296 bind(L_done);
7297 }
7298
7299 /**
7300 * Helper for multiply_to_len().
7301 */
7302 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7303 addq(dest_lo, src1);
7304 adcq(dest_hi, 0);
7305 addq(dest_lo, src2);
7306 adcq(dest_hi, 0);
7307 }
7308
7309 /**
7310 * Multiply 64 bit by 64 bit first loop.
7311 */
7312 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7313 Register y, Register y_idx, Register z,
7314 Register carry, Register product,
7315 Register idx, Register kdx) {
7316 //
7317 // jlong carry, x[], y[], z[];
7318 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7319 // huge_128 product = y[idx] * x[xstart] + carry;
7320 // z[kdx] = (jlong)product;
7321 // carry = (jlong)(product >>> 64);
7322 // }
7323 // z[xstart] = carry;
7324 //
7325
7326 Label L_first_loop, L_first_loop_exit;
7327 Label L_one_x, L_one_y, L_multiply;
7328
7329 decrementl(xstart);
7330 jcc(Assembler::negative, L_one_x);
7331
7332 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7333 rorq(x_xstart, 32); // convert big-endian to little-endian
7334
7335 bind(L_first_loop);
7336 decrementl(idx);
7337 jcc(Assembler::negative, L_first_loop_exit);
7338 decrementl(idx);
7339 jcc(Assembler::negative, L_one_y);
7340 movq(y_idx, Address(y, idx, Address::times_4, 0));
7341 rorq(y_idx, 32); // convert big-endian to little-endian
7342 bind(L_multiply);
7343 movq(product, x_xstart);
7344 mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7345 addq(product, carry);
7346 adcq(rdx, 0);
7347 subl(kdx, 2);
7348 movl(Address(z, kdx, Address::times_4, 4), product);
7349 shrq(product, 32);
7350 movl(Address(z, kdx, Address::times_4, 0), product);
7351 movq(carry, rdx);
7352 jmp(L_first_loop);
7353
7354 bind(L_one_y);
7355 movl(y_idx, Address(y, 0));
7356 jmp(L_multiply);
7357
7358 bind(L_one_x);
7359 movl(x_xstart, Address(x, 0));
7360 jmp(L_first_loop);
7361
7362 bind(L_first_loop_exit);
7363 }
7364
7365 /**
7366 * Multiply 64 bit by 64 bit and add 128 bit.
7367 */
7368 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7369 Register yz_idx, Register idx,
7370 Register carry, Register product, int offset) {
7371 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7372 // z[kdx] = (jlong)product;
7373
7374 movq(yz_idx, Address(y, idx, Address::times_4, offset));
7375 rorq(yz_idx, 32); // convert big-endian to little-endian
7376 movq(product, x_xstart);
7377 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7378 movq(yz_idx, Address(z, idx, Address::times_4, offset));
7379 rorq(yz_idx, 32); // convert big-endian to little-endian
7380
7381 add2_with_carry(rdx, product, carry, yz_idx);
7382
7383 movl(Address(z, idx, Address::times_4, offset+4), product);
7384 shrq(product, 32);
7385 movl(Address(z, idx, Address::times_4, offset), product);
7386
7387 }
7388
7389 /**
7390 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7391 */
7392 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7393 Register yz_idx, Register idx, Register jdx,
7394 Register carry, Register product,
7395 Register carry2) {
7396 // jlong carry, x[], y[], z[];
7397 // int kdx = ystart+1;
7398 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7399 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7400 // z[kdx+idx+1] = (jlong)product;
7401 // jlong carry2 = (jlong)(product >>> 64);
7402 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7403 // z[kdx+idx] = (jlong)product;
7404 // carry = (jlong)(product >>> 64);
7405 // }
7406 // idx += 2;
7407 // if (idx > 0) {
7408 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7409 // z[kdx+idx] = (jlong)product;
7410 // carry = (jlong)(product >>> 64);
7411 // }
7412 //
7413
7414 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7415
7416 movl(jdx, idx);
7417 andl(jdx, 0xFFFFFFFC);
7418 shrl(jdx, 2);
7419
7420 bind(L_third_loop);
7421 subl(jdx, 1);
7422 jcc(Assembler::negative, L_third_loop_exit);
7423 subl(idx, 4);
7424
7425 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7426 movq(carry2, rdx);
7427
7428 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7429 movq(carry, rdx);
7430 jmp(L_third_loop);
7431
7432 bind (L_third_loop_exit);
7433
7434 andl (idx, 0x3);
7435 jcc(Assembler::zero, L_post_third_loop_done);
7436
7437 Label L_check_1;
7438 subl(idx, 2);
7439 jcc(Assembler::negative, L_check_1);
7440
7441 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7442 movq(carry, rdx);
7443
7444 bind (L_check_1);
7445 addl (idx, 0x2);
7446 andl (idx, 0x1);
7447 subl(idx, 1);
7448 jcc(Assembler::negative, L_post_third_loop_done);
7449
7450 movl(yz_idx, Address(y, idx, Address::times_4, 0));
7451 movq(product, x_xstart);
7452 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7453 movl(yz_idx, Address(z, idx, Address::times_4, 0));
7454
7455 add2_with_carry(rdx, product, yz_idx, carry);
7456
7457 movl(Address(z, idx, Address::times_4, 0), product);
7458 shrq(product, 32);
7459
7460 shlq(rdx, 32);
7461 orq(product, rdx);
7462 movq(carry, product);
7463
7464 bind(L_post_third_loop_done);
7465 }
7466
7467 /**
7468 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7469 *
7470 */
7471 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7472 Register carry, Register carry2,
7473 Register idx, Register jdx,
7474 Register yz_idx1, Register yz_idx2,
7475 Register tmp, Register tmp3, Register tmp4) {
7476 assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7477
7478 // jlong carry, x[], y[], z[];
7479 // int kdx = ystart+1;
7480 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7481 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7482 // jlong carry2 = (jlong)(tmp3 >>> 64);
7483 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2;
7484 // carry = (jlong)(tmp4 >>> 64);
7485 // z[kdx+idx+1] = (jlong)tmp3;
7486 // z[kdx+idx] = (jlong)tmp4;
7487 // }
7488 // idx += 2;
7489 // if (idx > 0) {
7490 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7491 // z[kdx+idx] = (jlong)yz_idx1;
7492 // carry = (jlong)(yz_idx1 >>> 64);
7493 // }
7494 //
7495
7496 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7497
7498 movl(jdx, idx);
7499 andl(jdx, 0xFFFFFFFC);
7500 shrl(jdx, 2);
7501
7502 bind(L_third_loop);
7503 subl(jdx, 1);
7504 jcc(Assembler::negative, L_third_loop_exit);
7505 subl(idx, 4);
7506
7507 movq(yz_idx1, Address(y, idx, Address::times_4, 8));
7508 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7509 movq(yz_idx2, Address(y, idx, Address::times_4, 0));
7510 rorxq(yz_idx2, yz_idx2, 32);
7511
7512 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7513 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp
7514
7515 movq(yz_idx1, Address(z, idx, Address::times_4, 8));
7516 rorxq(yz_idx1, yz_idx1, 32);
7517 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7518 rorxq(yz_idx2, yz_idx2, 32);
7519
7520 if (VM_Version::supports_adx()) {
7521 adcxq(tmp3, carry);
7522 adoxq(tmp3, yz_idx1);
7523
7524 adcxq(tmp4, tmp);
7525 adoxq(tmp4, yz_idx2);
7526
7527 movl(carry, 0); // does not affect flags
7528 adcxq(carry2, carry);
7529 adoxq(carry2, carry);
7530 } else {
7531 add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7532 add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7533 }
7534 movq(carry, carry2);
7535
7536 movl(Address(z, idx, Address::times_4, 12), tmp3);
7537 shrq(tmp3, 32);
7538 movl(Address(z, idx, Address::times_4, 8), tmp3);
7539
7540 movl(Address(z, idx, Address::times_4, 4), tmp4);
7541 shrq(tmp4, 32);
7542 movl(Address(z, idx, Address::times_4, 0), tmp4);
7543
7544 jmp(L_third_loop);
7545
7546 bind (L_third_loop_exit);
7547
7548 andl (idx, 0x3);
7549 jcc(Assembler::zero, L_post_third_loop_done);
7550
7551 Label L_check_1;
7552 subl(idx, 2);
7553 jcc(Assembler::negative, L_check_1);
7554
7555 movq(yz_idx1, Address(y, idx, Address::times_4, 0));
7556 rorxq(yz_idx1, yz_idx1, 32);
7557 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7558 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7559 rorxq(yz_idx2, yz_idx2, 32);
7560
7561 add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7562
7563 movl(Address(z, idx, Address::times_4, 4), tmp3);
7564 shrq(tmp3, 32);
7565 movl(Address(z, idx, Address::times_4, 0), tmp3);
7566 movq(carry, tmp4);
7567
7568 bind (L_check_1);
7569 addl (idx, 0x2);
7570 andl (idx, 0x1);
7571 subl(idx, 1);
7572 jcc(Assembler::negative, L_post_third_loop_done);
7573 movl(tmp4, Address(y, idx, Address::times_4, 0));
7574 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3
7575 movl(tmp4, Address(z, idx, Address::times_4, 0));
7576
7577 add2_with_carry(carry2, tmp3, tmp4, carry);
7578
7579 movl(Address(z, idx, Address::times_4, 0), tmp3);
7580 shrq(tmp3, 32);
7581
7582 shlq(carry2, 32);
7583 orq(tmp3, carry2);
7584 movq(carry, tmp3);
7585
7586 bind(L_post_third_loop_done);
7587 }
7588
7589 /**
7590 * Code for BigInteger::multiplyToLen() intrinsic.
7591 *
7592 * rdi: x
7593 * rax: xlen
7594 * rsi: y
7595 * rcx: ylen
7596 * r8: z
7597 * r11: tmp0
7598 * r12: tmp1
7599 * r13: tmp2
7600 * r14: tmp3
7601 * r15: tmp4
7602 * rbx: tmp5
7603 *
7604 */
7605 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
7606 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7607 ShortBranchVerifier sbv(this);
7608 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7609
7610 push(tmp0);
7611 push(tmp1);
7612 push(tmp2);
7613 push(tmp3);
7614 push(tmp4);
7615 push(tmp5);
7616
7617 push(xlen);
7618
7619 const Register idx = tmp1;
7620 const Register kdx = tmp2;
7621 const Register xstart = tmp3;
7622
7623 const Register y_idx = tmp4;
7624 const Register carry = tmp5;
7625 const Register product = xlen;
7626 const Register x_xstart = tmp0;
7627
7628 // First Loop.
7629 //
7630 // final static long LONG_MASK = 0xffffffffL;
7631 // int xstart = xlen - 1;
7632 // int ystart = ylen - 1;
7633 // long carry = 0;
7634 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7635 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7636 // z[kdx] = (int)product;
7637 // carry = product >>> 32;
7638 // }
7639 // z[xstart] = (int)carry;
7640 //
7641
7642 movl(idx, ylen); // idx = ylen;
7643 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
7644 xorq(carry, carry); // carry = 0;
7645
7646 Label L_done;
7647
7648 movl(xstart, xlen);
7649 decrementl(xstart);
7650 jcc(Assembler::negative, L_done);
7651
7652 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7653
7654 Label L_second_loop;
7655 testl(kdx, kdx);
7656 jcc(Assembler::zero, L_second_loop);
7657
7658 Label L_carry;
7659 subl(kdx, 1);
7660 jcc(Assembler::zero, L_carry);
7661
7662 movl(Address(z, kdx, Address::times_4, 0), carry);
7663 shrq(carry, 32);
7664 subl(kdx, 1);
7665
7666 bind(L_carry);
7667 movl(Address(z, kdx, Address::times_4, 0), carry);
7668
7669 // Second and third (nested) loops.
7670 //
7671 // for (int i = xstart-1; i >= 0; i--) { // Second loop
7672 // carry = 0;
7673 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7674 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7675 // (z[k] & LONG_MASK) + carry;
7676 // z[k] = (int)product;
7677 // carry = product >>> 32;
7678 // }
7679 // z[i] = (int)carry;
7680 // }
7681 //
7682 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7683
7684 const Register jdx = tmp1;
7685
7686 bind(L_second_loop);
7687 xorl(carry, carry); // carry = 0;
7688 movl(jdx, ylen); // j = ystart+1
7689
7690 subl(xstart, 1); // i = xstart-1;
7691 jcc(Assembler::negative, L_done);
7692
7693 push (z);
7694
7695 Label L_last_x;
7696 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7697 subl(xstart, 1); // i = xstart-1;
7698 jcc(Assembler::negative, L_last_x);
7699
7700 if (UseBMI2Instructions) {
7701 movq(rdx, Address(x, xstart, Address::times_4, 0));
7702 rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7703 } else {
7704 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7705 rorq(x_xstart, 32); // convert big-endian to little-endian
7706 }
7707
7708 Label L_third_loop_prologue;
7709 bind(L_third_loop_prologue);
7710
7711 push (x);
7712 push (xstart);
7713 push (ylen);
7714
7715
7716 if (UseBMI2Instructions) {
7717 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7718 } else { // !UseBMI2Instructions
7719 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7720 }
7721
7722 pop(ylen);
7723 pop(xlen);
7724 pop(x);
7725 pop(z);
7726
7727 movl(tmp3, xlen);
7728 addl(tmp3, 1);
7729 movl(Address(z, tmp3, Address::times_4, 0), carry);
7730 subl(tmp3, 1);
7731 jccb(Assembler::negative, L_done);
7732
7733 shrq(carry, 32);
7734 movl(Address(z, tmp3, Address::times_4, 0), carry);
7735 jmp(L_second_loop);
7736
7737 // Next infrequent code is moved outside loops.
7738 bind(L_last_x);
7739 if (UseBMI2Instructions) {
7740 movl(rdx, Address(x, 0));
7741 } else {
7742 movl(x_xstart, Address(x, 0));
7743 }
7744 jmp(L_third_loop_prologue);
7745
7746 bind(L_done);
7747
7748 pop(xlen);
7749
7750 pop(tmp5);
7751 pop(tmp4);
7752 pop(tmp3);
7753 pop(tmp2);
7754 pop(tmp1);
7755 pop(tmp0);
7756 }
7757
7758 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
7759 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
7760 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
7761 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
7762 Label VECTOR8_TAIL, VECTOR4_TAIL;
7763 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
7764 Label SAME_TILL_END, DONE;
7765 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
7766
7767 //scale is in rcx in both Win64 and Unix
7768 ShortBranchVerifier sbv(this);
7769
7770 shlq(length);
7771 xorq(result, result);
7772
7773 if ((AVX3Threshold == 0) && (UseAVX > 2) &&
7774 VM_Version::supports_avx512vlbw()) {
7775 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
7776
7777 cmpq(length, 64);
7778 jcc(Assembler::less, VECTOR32_TAIL);
7779
7780 movq(tmp1, length);
7781 andq(tmp1, 0x3F); // tail count
7782 andq(length, ~(0x3F)); //vector count
7783
7784 bind(VECTOR64_LOOP);
7785 // AVX512 code to compare 64 byte vectors.
7786 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
7787 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
7788 kortestql(k7, k7);
7789 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch
7790 addq(result, 64);
7791 subq(length, 64);
7792 jccb(Assembler::notZero, VECTOR64_LOOP);
7793
7794 //bind(VECTOR64_TAIL);
7795 testq(tmp1, tmp1);
7796 jcc(Assembler::zero, SAME_TILL_END);
7797
7798 //bind(VECTOR64_TAIL);
7799 // AVX512 code to compare up to 63 byte vectors.
7800 mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
7801 shlxq(tmp2, tmp2, tmp1);
7802 notq(tmp2);
7803 kmovql(k3, tmp2);
7804
7805 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
7806 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
7807
7808 ktestql(k7, k3);
7809 jcc(Assembler::below, SAME_TILL_END); // not mismatch
7810
7811 bind(VECTOR64_NOT_EQUAL);
7812 kmovql(tmp1, k7);
7813 notq(tmp1);
7814 tzcntq(tmp1, tmp1);
7815 addq(result, tmp1);
7816 shrq(result);
7817 jmp(DONE);
7818 bind(VECTOR32_TAIL);
7819 }
7820
7821 cmpq(length, 8);
7822 jcc(Assembler::equal, VECTOR8_LOOP);
7823 jcc(Assembler::less, VECTOR4_TAIL);
7824
7825 if (UseAVX >= 2) {
7826 Label VECTOR16_TAIL, VECTOR32_LOOP;
7827
7828 cmpq(length, 16);
7829 jcc(Assembler::equal, VECTOR16_LOOP);
7830 jcc(Assembler::less, VECTOR8_LOOP);
7831
7832 cmpq(length, 32);
7833 jccb(Assembler::less, VECTOR16_TAIL);
7834
7835 subq(length, 32);
7836 bind(VECTOR32_LOOP);
7837 vmovdqu(rymm0, Address(obja, result));
7838 vmovdqu(rymm1, Address(objb, result));
7839 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
7840 vptest(rymm2, rymm2);
7841 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
7842 addq(result, 32);
7843 subq(length, 32);
7844 jcc(Assembler::greaterEqual, VECTOR32_LOOP);
7845 addq(length, 32);
7846 jcc(Assembler::equal, SAME_TILL_END);
7847 //falling through if less than 32 bytes left //close the branch here.
7848
7849 bind(VECTOR16_TAIL);
7850 cmpq(length, 16);
7851 jccb(Assembler::less, VECTOR8_TAIL);
7852 bind(VECTOR16_LOOP);
7853 movdqu(rymm0, Address(obja, result));
7854 movdqu(rymm1, Address(objb, result));
7855 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
7856 ptest(rymm2, rymm2);
7857 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7858 addq(result, 16);
7859 subq(length, 16);
7860 jcc(Assembler::equal, SAME_TILL_END);
7861 //falling through if less than 16 bytes left
7862 } else {//regular intrinsics
7863
7864 cmpq(length, 16);
7865 jccb(Assembler::less, VECTOR8_TAIL);
7866
7867 subq(length, 16);
7868 bind(VECTOR16_LOOP);
7869 movdqu(rymm0, Address(obja, result));
7870 movdqu(rymm1, Address(objb, result));
7871 pxor(rymm0, rymm1);
7872 ptest(rymm0, rymm0);
7873 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7874 addq(result, 16);
7875 subq(length, 16);
7876 jccb(Assembler::greaterEqual, VECTOR16_LOOP);
7877 addq(length, 16);
7878 jcc(Assembler::equal, SAME_TILL_END);
7879 //falling through if less than 16 bytes left
7880 }
7881
7882 bind(VECTOR8_TAIL);
7883 cmpq(length, 8);
7884 jccb(Assembler::less, VECTOR4_TAIL);
7885 bind(VECTOR8_LOOP);
7886 movq(tmp1, Address(obja, result));
7887 movq(tmp2, Address(objb, result));
7888 xorq(tmp1, tmp2);
7889 testq(tmp1, tmp1);
7890 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
7891 addq(result, 8);
7892 subq(length, 8);
7893 jcc(Assembler::equal, SAME_TILL_END);
7894 //falling through if less than 8 bytes left
7895
7896 bind(VECTOR4_TAIL);
7897 cmpq(length, 4);
7898 jccb(Assembler::less, BYTES_TAIL);
7899 bind(VECTOR4_LOOP);
7900 movl(tmp1, Address(obja, result));
7901 xorl(tmp1, Address(objb, result));
7902 testl(tmp1, tmp1);
7903 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
7904 addq(result, 4);
7905 subq(length, 4);
7906 jcc(Assembler::equal, SAME_TILL_END);
7907 //falling through if less than 4 bytes left
7908
7909 bind(BYTES_TAIL);
7910 bind(BYTES_LOOP);
7911 load_unsigned_byte(tmp1, Address(obja, result));
7912 load_unsigned_byte(tmp2, Address(objb, result));
7913 xorl(tmp1, tmp2);
7914 testl(tmp1, tmp1);
7915 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7916 decq(length);
7917 jcc(Assembler::zero, SAME_TILL_END);
7918 incq(result);
7919 load_unsigned_byte(tmp1, Address(obja, result));
7920 load_unsigned_byte(tmp2, Address(objb, result));
7921 xorl(tmp1, tmp2);
7922 testl(tmp1, tmp1);
7923 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7924 decq(length);
7925 jcc(Assembler::zero, SAME_TILL_END);
7926 incq(result);
7927 load_unsigned_byte(tmp1, Address(obja, result));
7928 load_unsigned_byte(tmp2, Address(objb, result));
7929 xorl(tmp1, tmp2);
7930 testl(tmp1, tmp1);
7931 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7932 jmp(SAME_TILL_END);
7933
7934 if (UseAVX >= 2) {
7935 bind(VECTOR32_NOT_EQUAL);
7936 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
7937 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
7938 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
7939 vpmovmskb(tmp1, rymm0);
7940 bsfq(tmp1, tmp1);
7941 addq(result, tmp1);
7942 shrq(result);
7943 jmp(DONE);
7944 }
7945
7946 bind(VECTOR16_NOT_EQUAL);
7947 if (UseAVX >= 2) {
7948 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
7949 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
7950 pxor(rymm0, rymm2);
7951 } else {
7952 pcmpeqb(rymm2, rymm2);
7953 pxor(rymm0, rymm1);
7954 pcmpeqb(rymm0, rymm1);
7955 pxor(rymm0, rymm2);
7956 }
7957 pmovmskb(tmp1, rymm0);
7958 bsfq(tmp1, tmp1);
7959 addq(result, tmp1);
7960 shrq(result);
7961 jmpb(DONE);
7962
7963 bind(VECTOR8_NOT_EQUAL);
7964 bind(VECTOR4_NOT_EQUAL);
7965 bsfq(tmp1, tmp1);
7966 shrq(tmp1, 3);
7967 addq(result, tmp1);
7968 bind(BYTES_NOT_EQUAL);
7969 shrq(result);
7970 jmpb(DONE);
7971
7972 bind(SAME_TILL_END);
7973 mov64(result, -1);
7974
7975 bind(DONE);
7976 }
7977
7978 //Helper functions for square_to_len()
7979
7980 /**
7981 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7982 * Preserves x and z and modifies rest of the registers.
7983 */
7984 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7985 // Perform square and right shift by 1
7986 // Handle odd xlen case first, then for even xlen do the following
7987 // jlong carry = 0;
7988 // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7989 // huge_128 product = x[j:j+1] * x[j:j+1];
7990 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7991 // z[i+2:i+3] = (jlong)(product >>> 1);
7992 // carry = (jlong)product;
7993 // }
7994
7995 xorq(tmp5, tmp5); // carry
7996 xorq(rdxReg, rdxReg);
7997 xorl(tmp1, tmp1); // index for x
7998 xorl(tmp4, tmp4); // index for z
7999
8000 Label L_first_loop, L_first_loop_exit;
8001
8002 testl(xlen, 1);
8003 jccb(Assembler::zero, L_first_loop); //jump if xlen is even
8004
8005 // Square and right shift by 1 the odd element using 32 bit multiply
8006 movl(raxReg, Address(x, tmp1, Address::times_4, 0));
8007 imulq(raxReg, raxReg);
8008 shrq(raxReg, 1);
8009 adcq(tmp5, 0);
8010 movq(Address(z, tmp4, Address::times_4, 0), raxReg);
8011 incrementl(tmp1);
8012 addl(tmp4, 2);
8013
8014 // Square and right shift by 1 the rest using 64 bit multiply
8015 bind(L_first_loop);
8016 cmpptr(tmp1, xlen);
8017 jccb(Assembler::equal, L_first_loop_exit);
8018
8019 // Square
8020 movq(raxReg, Address(x, tmp1, Address::times_4, 0));
8021 rorq(raxReg, 32); // convert big-endian to little-endian
8022 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
8023
8024 // Right shift by 1 and save carry
8025 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
8026 rcrq(rdxReg, 1);
8027 rcrq(raxReg, 1);
8028 adcq(tmp5, 0);
8029
8030 // Store result in z
8031 movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
8032 movq(Address(z, tmp4, Address::times_4, 8), raxReg);
8033
8034 // Update indices for x and z
8035 addl(tmp1, 2);
8036 addl(tmp4, 4);
8037 jmp(L_first_loop);
8038
8039 bind(L_first_loop_exit);
8040 }
8041
8042
8043 /**
8044 * Perform the following multiply add operation using BMI2 instructions
8045 * carry:sum = sum + op1*op2 + carry
8046 * op2 should be in rdx
8047 * op2 is preserved, all other registers are modified
8048 */
8049 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
8050 // assert op2 is rdx
8051 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
8052 addq(sum, carry);
8053 adcq(tmp2, 0);
8054 addq(sum, op1);
8055 adcq(tmp2, 0);
8056 movq(carry, tmp2);
8057 }
8058
8059 /**
8060 * Perform the following multiply add operation:
8061 * carry:sum = sum + op1*op2 + carry
8062 * Preserves op1, op2 and modifies rest of registers
8063 */
8064 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
8065 // rdx:rax = op1 * op2
8066 movq(raxReg, op2);
8067 mulq(op1);
8068
8069 // rdx:rax = sum + carry + rdx:rax
8070 addq(sum, carry);
8071 adcq(rdxReg, 0);
8072 addq(sum, raxReg);
8073 adcq(rdxReg, 0);
8074
8075 // carry:sum = rdx:sum
8076 movq(carry, rdxReg);
8077 }
8078
8079 /**
8080 * Add 64 bit long carry into z[] with carry propagation.
8081 * Preserves z and carry register values and modifies rest of registers.
8082 *
8083 */
8084 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
8085 Label L_fourth_loop, L_fourth_loop_exit;
8086
8087 movl(tmp1, 1);
8088 subl(zlen, 2);
8089 addq(Address(z, zlen, Address::times_4, 0), carry);
8090
8091 bind(L_fourth_loop);
8092 jccb(Assembler::carryClear, L_fourth_loop_exit);
8093 subl(zlen, 2);
8094 jccb(Assembler::negative, L_fourth_loop_exit);
8095 addq(Address(z, zlen, Address::times_4, 0), tmp1);
8096 jmp(L_fourth_loop);
8097 bind(L_fourth_loop_exit);
8098 }
8099
8100 /**
8101 * Shift z[] left by 1 bit.
8102 * Preserves x, len, z and zlen registers and modifies rest of the registers.
8103 *
8104 */
8105 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
8106
8107 Label L_fifth_loop, L_fifth_loop_exit;
8108
8109 // Fifth loop
8110 // Perform primitiveLeftShift(z, zlen, 1)
8111
8112 const Register prev_carry = tmp1;
8113 const Register new_carry = tmp4;
8114 const Register value = tmp2;
8115 const Register zidx = tmp3;
8116
8117 // int zidx, carry;
8118 // long value;
8119 // carry = 0;
8120 // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
8121 // (carry:value) = (z[i] << 1) | carry ;
8122 // z[i] = value;
8123 // }
8124
8125 movl(zidx, zlen);
8126 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
8127
8128 bind(L_fifth_loop);
8129 decl(zidx); // Use decl to preserve carry flag
8130 decl(zidx);
8131 jccb(Assembler::negative, L_fifth_loop_exit);
8132
8133 if (UseBMI2Instructions) {
8134 movq(value, Address(z, zidx, Address::times_4, 0));
8135 rclq(value, 1);
8136 rorxq(value, value, 32);
8137 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8138 }
8139 else {
8140 // clear new_carry
8141 xorl(new_carry, new_carry);
8142
8143 // Shift z[i] by 1, or in previous carry and save new carry
8144 movq(value, Address(z, zidx, Address::times_4, 0));
8145 shlq(value, 1);
8146 adcl(new_carry, 0);
8147
8148 orq(value, prev_carry);
8149 rorq(value, 0x20);
8150 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8151
8152 // Set previous carry = new carry
8153 movl(prev_carry, new_carry);
8154 }
8155 jmp(L_fifth_loop);
8156
8157 bind(L_fifth_loop_exit);
8158 }
8159
8160
8161 /**
8162 * Code for BigInteger::squareToLen() intrinsic
8163 *
8164 * rdi: x
8165 * rsi: len
8166 * r8: z
8167 * rcx: zlen
8168 * r12: tmp1
8169 * r13: tmp2
8170 * r14: tmp3
8171 * r15: tmp4
8172 * rbx: tmp5
8173 *
8174 */
8175 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8176
8177 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
8178 push(tmp1);
8179 push(tmp2);
8180 push(tmp3);
8181 push(tmp4);
8182 push(tmp5);
8183
8184 // First loop
8185 // Store the squares, right shifted one bit (i.e., divided by 2).
8186 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
8187
8188 // Add in off-diagonal sums.
8189 //
8190 // Second, third (nested) and fourth loops.
8191 // zlen +=2;
8192 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
8193 // carry = 0;
8194 // long op2 = x[xidx:xidx+1];
8195 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
8196 // k -= 2;
8197 // long op1 = x[j:j+1];
8198 // long sum = z[k:k+1];
8199 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
8200 // z[k:k+1] = sum;
8201 // }
8202 // add_one_64(z, k, carry, tmp_regs);
8203 // }
8204
8205 const Register carry = tmp5;
8206 const Register sum = tmp3;
8207 const Register op1 = tmp4;
8208 Register op2 = tmp2;
8209
8210 push(zlen);
8211 push(len);
8212 addl(zlen,2);
8213 bind(L_second_loop);
8214 xorq(carry, carry);
8215 subl(zlen, 4);
8216 subl(len, 2);
8217 push(zlen);
8218 push(len);
8219 cmpl(len, 0);
8220 jccb(Assembler::lessEqual, L_second_loop_exit);
8221
8222 // Multiply an array by one 64 bit long.
8223 if (UseBMI2Instructions) {
8224 op2 = rdxReg;
8225 movq(op2, Address(x, len, Address::times_4, 0));
8226 rorxq(op2, op2, 32);
8227 }
8228 else {
8229 movq(op2, Address(x, len, Address::times_4, 0));
8230 rorq(op2, 32);
8231 }
8232
8233 bind(L_third_loop);
8234 decrementl(len);
8235 jccb(Assembler::negative, L_third_loop_exit);
8236 decrementl(len);
8237 jccb(Assembler::negative, L_last_x);
8238
8239 movq(op1, Address(x, len, Address::times_4, 0));
8240 rorq(op1, 32);
8241
8242 bind(L_multiply);
8243 subl(zlen, 2);
8244 movq(sum, Address(z, zlen, Address::times_4, 0));
8245
8246 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
8247 if (UseBMI2Instructions) {
8248 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
8249 }
8250 else {
8251 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8252 }
8253
8254 movq(Address(z, zlen, Address::times_4, 0), sum);
8255
8256 jmp(L_third_loop);
8257 bind(L_third_loop_exit);
8258
8259 // Fourth loop
8260 // Add 64 bit long carry into z with carry propagation.
8261 // Uses offsetted zlen.
8262 add_one_64(z, zlen, carry, tmp1);
8263
8264 pop(len);
8265 pop(zlen);
8266 jmp(L_second_loop);
8267
8268 // Next infrequent code is moved outside loops.
8269 bind(L_last_x);
8270 movl(op1, Address(x, 0));
8271 jmp(L_multiply);
8272
8273 bind(L_second_loop_exit);
8274 pop(len);
8275 pop(zlen);
8276 pop(len);
8277 pop(zlen);
8278
8279 // Fifth loop
8280 // Shift z left 1 bit.
8281 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8282
8283 // z[zlen-1] |= x[len-1] & 1;
8284 movl(tmp3, Address(x, len, Address::times_4, -4));
8285 andl(tmp3, 1);
8286 orl(Address(z, zlen, Address::times_4, -4), tmp3);
8287
8288 pop(tmp5);
8289 pop(tmp4);
8290 pop(tmp3);
8291 pop(tmp2);
8292 pop(tmp1);
8293 }
8294
8295 /**
8296 * Helper function for mul_add()
8297 * Multiply the in[] by int k and add to out[] starting at offset offs using
8298 * 128 bit by 32 bit multiply and return the carry in tmp5.
8299 * Only quad int aligned length of in[] is operated on in this function.
8300 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8301 * This function preserves out, in and k registers.
8302 * len and offset point to the appropriate index in "in" & "out" correspondingly
8303 * tmp5 has the carry.
8304 * other registers are temporary and are modified.
8305 *
8306 */
8307 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8308 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8309 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8310
8311 Label L_first_loop, L_first_loop_exit;
8312
8313 movl(tmp1, len);
8314 shrl(tmp1, 2);
8315
8316 bind(L_first_loop);
8317 subl(tmp1, 1);
8318 jccb(Assembler::negative, L_first_loop_exit);
8319
8320 subl(len, 4);
8321 subl(offset, 4);
8322
8323 Register op2 = tmp2;
8324 const Register sum = tmp3;
8325 const Register op1 = tmp4;
8326 const Register carry = tmp5;
8327
8328 if (UseBMI2Instructions) {
8329 op2 = rdxReg;
8330 }
8331
8332 movq(op1, Address(in, len, Address::times_4, 8));
8333 rorq(op1, 32);
8334 movq(sum, Address(out, offset, Address::times_4, 8));
8335 rorq(sum, 32);
8336 if (UseBMI2Instructions) {
8337 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8338 }
8339 else {
8340 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8341 }
8342 // Store back in big endian from little endian
8343 rorq(sum, 0x20);
8344 movq(Address(out, offset, Address::times_4, 8), sum);
8345
8346 movq(op1, Address(in, len, Address::times_4, 0));
8347 rorq(op1, 32);
8348 movq(sum, Address(out, offset, Address::times_4, 0));
8349 rorq(sum, 32);
8350 if (UseBMI2Instructions) {
8351 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8352 }
8353 else {
8354 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8355 }
8356 // Store back in big endian from little endian
8357 rorq(sum, 0x20);
8358 movq(Address(out, offset, Address::times_4, 0), sum);
8359
8360 jmp(L_first_loop);
8361 bind(L_first_loop_exit);
8362 }
8363
8364 /**
8365 * Code for BigInteger::mulAdd() intrinsic
8366 *
8367 * rdi: out
8368 * rsi: in
8369 * r11: offs (out.length - offset)
8370 * rcx: len
8371 * r8: k
8372 * r12: tmp1
8373 * r13: tmp2
8374 * r14: tmp3
8375 * r15: tmp4
8376 * rbx: tmp5
8377 * Multiply the in[] by word k and add to out[], return the carry in rax
8378 */
8379 void MacroAssembler::mul_add(Register out, Register in, Register offs,
8380 Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8381 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8382
8383 Label L_carry, L_last_in, L_done;
8384
8385 // carry = 0;
8386 // for (int j=len-1; j >= 0; j--) {
8387 // long product = (in[j] & LONG_MASK) * kLong +
8388 // (out[offs] & LONG_MASK) + carry;
8389 // out[offs--] = (int)product;
8390 // carry = product >>> 32;
8391 // }
8392 //
8393 push(tmp1);
8394 push(tmp2);
8395 push(tmp3);
8396 push(tmp4);
8397 push(tmp5);
8398
8399 Register op2 = tmp2;
8400 const Register sum = tmp3;
8401 const Register op1 = tmp4;
8402 const Register carry = tmp5;
8403
8404 if (UseBMI2Instructions) {
8405 op2 = rdxReg;
8406 movl(op2, k);
8407 }
8408 else {
8409 movl(op2, k);
8410 }
8411
8412 xorq(carry, carry);
8413
8414 //First loop
8415
8416 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8417 //The carry is in tmp5
8418 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8419
8420 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8421 decrementl(len);
8422 jccb(Assembler::negative, L_carry);
8423 decrementl(len);
8424 jccb(Assembler::negative, L_last_in);
8425
8426 movq(op1, Address(in, len, Address::times_4, 0));
8427 rorq(op1, 32);
8428
8429 subl(offs, 2);
8430 movq(sum, Address(out, offs, Address::times_4, 0));
8431 rorq(sum, 32);
8432
8433 if (UseBMI2Instructions) {
8434 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8435 }
8436 else {
8437 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8438 }
8439
8440 // Store back in big endian from little endian
8441 rorq(sum, 0x20);
8442 movq(Address(out, offs, Address::times_4, 0), sum);
8443
8444 testl(len, len);
8445 jccb(Assembler::zero, L_carry);
8446
8447 //Multiply the last in[] entry, if any
8448 bind(L_last_in);
8449 movl(op1, Address(in, 0));
8450 movl(sum, Address(out, offs, Address::times_4, -4));
8451
8452 movl(raxReg, k);
8453 mull(op1); //tmp4 * eax -> edx:eax
8454 addl(sum, carry);
8455 adcl(rdxReg, 0);
8456 addl(sum, raxReg);
8457 adcl(rdxReg, 0);
8458 movl(carry, rdxReg);
8459
8460 movl(Address(out, offs, Address::times_4, -4), sum);
8461
8462 bind(L_carry);
8463 //return tmp5/carry as carry in rax
8464 movl(rax, carry);
8465
8466 bind(L_done);
8467 pop(tmp5);
8468 pop(tmp4);
8469 pop(tmp3);
8470 pop(tmp2);
8471 pop(tmp1);
8472 }
8473
8474 /**
8475 * Emits code to update CRC-32 with a byte value according to constants in table
8476 *
8477 * @param [in,out]crc Register containing the crc.
8478 * @param [in]val Register containing the byte to fold into the CRC.
8479 * @param [in]table Register containing the table of crc constants.
8480 *
8481 * uint32_t crc;
8482 * val = crc_table[(val ^ crc) & 0xFF];
8483 * crc = val ^ (crc >> 8);
8484 *
8485 */
8486 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8487 xorl(val, crc);
8488 andl(val, 0xFF);
8489 shrl(crc, 8); // unsigned shift
8490 xorl(crc, Address(table, val, Address::times_4, 0));
8491 }
8492
8493 /**
8494 * Fold 128-bit data chunk
8495 */
8496 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8497 if (UseAVX > 0) {
8498 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8499 vpclmulldq(xcrc, xK, xcrc); // [63:0]
8500 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
8501 pxor(xcrc, xtmp);
8502 } else {
8503 movdqa(xtmp, xcrc);
8504 pclmulhdq(xtmp, xK); // [123:64]
8505 pclmulldq(xcrc, xK); // [63:0]
8506 pxor(xcrc, xtmp);
8507 movdqu(xtmp, Address(buf, offset));
8508 pxor(xcrc, xtmp);
8509 }
8510 }
8511
8512 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8513 if (UseAVX > 0) {
8514 vpclmulhdq(xtmp, xK, xcrc);
8515 vpclmulldq(xcrc, xK, xcrc);
8516 pxor(xcrc, xbuf);
8517 pxor(xcrc, xtmp);
8518 } else {
8519 movdqa(xtmp, xcrc);
8520 pclmulhdq(xtmp, xK);
8521 pclmulldq(xcrc, xK);
8522 pxor(xcrc, xbuf);
8523 pxor(xcrc, xtmp);
8524 }
8525 }
8526
8527 /**
8528 * 8-bit folds to compute 32-bit CRC
8529 *
8530 * uint64_t xcrc;
8531 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8532 */
8533 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8534 movdl(tmp, xcrc);
8535 andl(tmp, 0xFF);
8536 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8537 psrldq(xcrc, 1); // unsigned shift one byte
8538 pxor(xcrc, xtmp);
8539 }
8540
8541 /**
8542 * uint32_t crc;
8543 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8544 */
8545 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8546 movl(tmp, crc);
8547 andl(tmp, 0xFF);
8548 shrl(crc, 8);
8549 xorl(crc, Address(table, tmp, Address::times_4, 0));
8550 }
8551
8552 /**
8553 * @param crc register containing existing CRC (32-bit)
8554 * @param buf register pointing to input byte buffer (byte*)
8555 * @param len register containing number of bytes
8556 * @param table register that will contain address of CRC table
8557 * @param tmp scratch register
8558 */
8559 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8560 assert_different_registers(crc, buf, len, table, tmp, rax);
8561
8562 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8563 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8564
8565 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8566 // context for the registers used, where all instructions below are using 128-bit mode
8567 // On EVEX without VL and BW, these instructions will all be AVX.
8568 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8569 notl(crc); // ~crc
8570 cmpl(len, 16);
8571 jcc(Assembler::less, L_tail);
8572
8573 // Align buffer to 16 bytes
8574 movl(tmp, buf);
8575 andl(tmp, 0xF);
8576 jccb(Assembler::zero, L_aligned);
8577 subl(tmp, 16);
8578 addl(len, tmp);
8579
8580 align(4);
8581 BIND(L_align_loop);
8582 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8583 update_byte_crc32(crc, rax, table);
8584 increment(buf);
8585 incrementl(tmp);
8586 jccb(Assembler::less, L_align_loop);
8587
8588 BIND(L_aligned);
8589 movl(tmp, len); // save
8590 shrl(len, 4);
8591 jcc(Assembler::zero, L_tail_restore);
8592
8593 // Fold crc into first bytes of vector
8594 movdqa(xmm1, Address(buf, 0));
8595 movdl(rax, xmm1);
8596 xorl(crc, rax);
8597 if (VM_Version::supports_sse4_1()) {
8598 pinsrd(xmm1, crc, 0);
8599 } else {
8600 pinsrw(xmm1, crc, 0);
8601 shrl(crc, 16);
8602 pinsrw(xmm1, crc, 1);
8603 }
8604 addptr(buf, 16);
8605 subl(len, 4); // len > 0
8606 jcc(Assembler::less, L_fold_tail);
8607
8608 movdqa(xmm2, Address(buf, 0));
8609 movdqa(xmm3, Address(buf, 16));
8610 movdqa(xmm4, Address(buf, 32));
8611 addptr(buf, 48);
8612 subl(len, 3);
8613 jcc(Assembler::lessEqual, L_fold_512b);
8614
8615 // Fold total 512 bits of polynomial on each iteration,
8616 // 128 bits per each of 4 parallel streams.
8617 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
8618
8619 align32();
8620 BIND(L_fold_512b_loop);
8621 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8622 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8623 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8624 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8625 addptr(buf, 64);
8626 subl(len, 4);
8627 jcc(Assembler::greater, L_fold_512b_loop);
8628
8629 // Fold 512 bits to 128 bits.
8630 BIND(L_fold_512b);
8631 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8632 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8633 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8634 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8635
8636 // Fold the rest of 128 bits data chunks
8637 BIND(L_fold_tail);
8638 addl(len, 3);
8639 jccb(Assembler::lessEqual, L_fold_128b);
8640 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8641
8642 BIND(L_fold_tail_loop);
8643 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8644 addptr(buf, 16);
8645 decrementl(len);
8646 jccb(Assembler::greater, L_fold_tail_loop);
8647
8648 // Fold 128 bits in xmm1 down into 32 bits in crc register.
8649 BIND(L_fold_128b);
8650 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
8651 if (UseAVX > 0) {
8652 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8653 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
8654 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8655 } else {
8656 movdqa(xmm2, xmm0);
8657 pclmulqdq(xmm2, xmm1, 0x1);
8658 movdqa(xmm3, xmm0);
8659 pand(xmm3, xmm2);
8660 pclmulqdq(xmm0, xmm3, 0x1);
8661 }
8662 psrldq(xmm1, 8);
8663 psrldq(xmm2, 4);
8664 pxor(xmm0, xmm1);
8665 pxor(xmm0, xmm2);
8666
8667 // 8 8-bit folds to compute 32-bit CRC.
8668 for (int j = 0; j < 4; j++) {
8669 fold_8bit_crc32(xmm0, table, xmm1, rax);
8670 }
8671 movdl(crc, xmm0); // mov 32 bits to general register
8672 for (int j = 0; j < 4; j++) {
8673 fold_8bit_crc32(crc, table, rax);
8674 }
8675
8676 BIND(L_tail_restore);
8677 movl(len, tmp); // restore
8678 BIND(L_tail);
8679 andl(len, 0xf);
8680 jccb(Assembler::zero, L_exit);
8681
8682 // Fold the rest of bytes
8683 align(4);
8684 BIND(L_tail_loop);
8685 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8686 update_byte_crc32(crc, rax, table);
8687 increment(buf);
8688 decrementl(len);
8689 jccb(Assembler::greater, L_tail_loop);
8690
8691 BIND(L_exit);
8692 notl(crc); // ~c
8693 }
8694
8695 // Helper function for AVX 512 CRC32
8696 // Fold 512-bit data chunks
8697 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
8698 Register pos, int offset) {
8699 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
8700 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
8701 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
8702 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
8703 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
8704 }
8705
8706 // Helper function for AVX 512 CRC32
8707 // Compute CRC32 for < 256B buffers
8708 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
8709 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
8710 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
8711
8712 Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
8713 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
8714 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
8715
8716 // check if there is enough buffer to be able to fold 16B at a time
8717 cmpl(len, 32);
8718 jcc(Assembler::less, L_less_than_32);
8719
8720 // if there is, load the constants
8721 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10
8722 movdl(xmm0, crc); // get the initial crc value
8723 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8724 pxor(xmm7, xmm0);
8725
8726 // update the buffer pointer
8727 addl(pos, 16);
8728 //update the counter.subtract 32 instead of 16 to save one instruction from the loop
8729 subl(len, 32);
8730 jmp(L_16B_reduction_loop);
8731
8732 bind(L_less_than_32);
8733 //mov initial crc to the return value. this is necessary for zero - length buffers.
8734 movl(rax, crc);
8735 testl(len, len);
8736 jcc(Assembler::equal, L_cleanup);
8737
8738 movdl(xmm0, crc); //get the initial crc value
8739
8740 cmpl(len, 16);
8741 jcc(Assembler::equal, L_exact_16_left);
8742 jcc(Assembler::less, L_less_than_16_left);
8743
8744 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8745 pxor(xmm7, xmm0); //xor the initial crc value
8746 addl(pos, 16);
8747 subl(len, 16);
8748 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10
8749 jmp(L_get_last_two_xmms);
8750
8751 bind(L_less_than_16_left);
8752 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
8753 pxor(xmm1, xmm1);
8754 movptr(tmp1, rsp);
8755 movdqu(Address(tmp1, 0 * 16), xmm1);
8756
8757 cmpl(len, 4);
8758 jcc(Assembler::less, L_only_less_than_4);
8759
8760 //backup the counter value
8761 movl(tmp2, len);
8762 cmpl(len, 8);
8763 jcc(Assembler::less, L_less_than_8_left);
8764
8765 //load 8 Bytes
8766 movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
8767 movq(Address(tmp1, 0 * 16), rax);
8768 addptr(tmp1, 8);
8769 subl(len, 8);
8770 addl(pos, 8);
8771
8772 bind(L_less_than_8_left);
8773 cmpl(len, 4);
8774 jcc(Assembler::less, L_less_than_4_left);
8775
8776 //load 4 Bytes
8777 movl(rax, Address(buf, pos, Address::times_1, 0));
8778 movl(Address(tmp1, 0 * 16), rax);
8779 addptr(tmp1, 4);
8780 subl(len, 4);
8781 addl(pos, 4);
8782
8783 bind(L_less_than_4_left);
8784 cmpl(len, 2);
8785 jcc(Assembler::less, L_less_than_2_left);
8786
8787 // load 2 Bytes
8788 movw(rax, Address(buf, pos, Address::times_1, 0));
8789 movl(Address(tmp1, 0 * 16), rax);
8790 addptr(tmp1, 2);
8791 subl(len, 2);
8792 addl(pos, 2);
8793
8794 bind(L_less_than_2_left);
8795 cmpl(len, 1);
8796 jcc(Assembler::less, L_zero_left);
8797
8798 // load 1 Byte
8799 movb(rax, Address(buf, pos, Address::times_1, 0));
8800 movb(Address(tmp1, 0 * 16), rax);
8801
8802 bind(L_zero_left);
8803 movdqu(xmm7, Address(rsp, 0));
8804 pxor(xmm7, xmm0); //xor the initial crc value
8805
8806 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8807 movdqu(xmm0, Address(rax, tmp2));
8808 pshufb(xmm7, xmm0);
8809 jmp(L_128_done);
8810
8811 bind(L_exact_16_left);
8812 movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
8813 pxor(xmm7, xmm0); //xor the initial crc value
8814 jmp(L_128_done);
8815
8816 bind(L_only_less_than_4);
8817 cmpl(len, 3);
8818 jcc(Assembler::less, L_only_less_than_3);
8819
8820 // load 3 Bytes
8821 movb(rax, Address(buf, pos, Address::times_1, 0));
8822 movb(Address(tmp1, 0), rax);
8823
8824 movb(rax, Address(buf, pos, Address::times_1, 1));
8825 movb(Address(tmp1, 1), rax);
8826
8827 movb(rax, Address(buf, pos, Address::times_1, 2));
8828 movb(Address(tmp1, 2), rax);
8829
8830 movdqu(xmm7, Address(rsp, 0));
8831 pxor(xmm7, xmm0); //xor the initial crc value
8832
8833 pslldq(xmm7, 0x5);
8834 jmp(L_barrett);
8835 bind(L_only_less_than_3);
8836 cmpl(len, 2);
8837 jcc(Assembler::less, L_only_less_than_2);
8838
8839 // load 2 Bytes
8840 movb(rax, Address(buf, pos, Address::times_1, 0));
8841 movb(Address(tmp1, 0), rax);
8842
8843 movb(rax, Address(buf, pos, Address::times_1, 1));
8844 movb(Address(tmp1, 1), rax);
8845
8846 movdqu(xmm7, Address(rsp, 0));
8847 pxor(xmm7, xmm0); //xor the initial crc value
8848
8849 pslldq(xmm7, 0x6);
8850 jmp(L_barrett);
8851
8852 bind(L_only_less_than_2);
8853 //load 1 Byte
8854 movb(rax, Address(buf, pos, Address::times_1, 0));
8855 movb(Address(tmp1, 0), rax);
8856
8857 movdqu(xmm7, Address(rsp, 0));
8858 pxor(xmm7, xmm0); //xor the initial crc value
8859
8860 pslldq(xmm7, 0x7);
8861 }
8862
8863 /**
8864 * Compute CRC32 using AVX512 instructions
8865 * param crc register containing existing CRC (32-bit)
8866 * param buf register pointing to input byte buffer (byte*)
8867 * param len register containing number of bytes
8868 * param table address of crc or crc32c table
8869 * param tmp1 scratch register
8870 * param tmp2 scratch register
8871 * return rax result register
8872 *
8873 * This routine is identical for crc32c with the exception of the precomputed constant
8874 * table which will be passed as the table argument. The calculation steps are
8875 * the same for both variants.
8876 */
8877 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
8878 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
8879
8880 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8881 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8882 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
8883 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
8884 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
8885
8886 const Register pos = r12;
8887 push(r12);
8888 subptr(rsp, 16 * 2 + 8);
8889
8890 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8891 // context for the registers used, where all instructions below are using 128-bit mode
8892 // On EVEX without VL and BW, these instructions will all be AVX.
8893 movl(pos, 0);
8894
8895 // check if smaller than 256B
8896 cmpl(len, 256);
8897 jcc(Assembler::less, L_less_than_256);
8898
8899 // load the initial crc value
8900 movdl(xmm10, crc);
8901
8902 // receive the initial 64B data, xor the initial crc value
8903 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
8904 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
8905 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
8906 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
8907
8908 subl(len, 256);
8909 cmpl(len, 256);
8910 jcc(Assembler::less, L_fold_128_B_loop);
8911
8912 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
8913 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
8914 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
8915 subl(len, 256);
8916
8917 bind(L_fold_256_B_loop);
8918 addl(pos, 256);
8919 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
8920 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
8921 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
8922 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
8923
8924 subl(len, 256);
8925 jcc(Assembler::greaterEqual, L_fold_256_B_loop);
8926
8927 // Fold 256 into 128
8928 addl(pos, 256);
8929 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
8930 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
8931 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
8932
8933 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
8934 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
8935 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
8936
8937 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
8938 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
8939
8940 addl(len, 128);
8941 jmp(L_fold_128_B_register);
8942
8943 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
8944 // loop will fold 128B at a time until we have 128 + y Bytes of buffer
8945
8946 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
8947 bind(L_fold_128_B_loop);
8948 addl(pos, 128);
8949 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
8950 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
8951
8952 subl(len, 128);
8953 jcc(Assembler::greaterEqual, L_fold_128_B_loop);
8954
8955 addl(pos, 128);
8956
8957 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
8958 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
8959 bind(L_fold_128_B_register);
8960 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
8961 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
8962 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
8963 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
8964 // save last that has no multiplicand
8965 vextracti64x2(xmm7, xmm4, 3);
8966
8967 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
8968 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
8969 // Needed later in reduction loop
8970 movdqu(xmm10, Address(table, 1 * 16));
8971 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
8972 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
8973
8974 // Swap 1,0,3,2 - 01 00 11 10
8975 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
8976 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
8977 vextracti128(xmm5, xmm8, 1);
8978 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
8979
8980 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
8981 // instead of a cmp instruction, we use the negative flag with the jl instruction
8982 addl(len, 128 - 16);
8983 jcc(Assembler::less, L_final_reduction_for_128);
8984
8985 bind(L_16B_reduction_loop);
8986 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8987 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8988 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8989 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
8990 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8991 addl(pos, 16);
8992 subl(len, 16);
8993 jcc(Assembler::greaterEqual, L_16B_reduction_loop);
8994
8995 bind(L_final_reduction_for_128);
8996 addl(len, 16);
8997 jcc(Assembler::equal, L_128_done);
8998
8999 bind(L_get_last_two_xmms);
9000 movdqu(xmm2, xmm7);
9001 addl(pos, len);
9002 movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
9003 subl(pos, len);
9004
9005 // get rid of the extra data that was loaded before
9006 // load the shift constant
9007 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
9008 movdqu(xmm0, Address(rax, len));
9009 addl(rax, len);
9010
9011 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
9012 //Change mask to 512
9013 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
9014 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
9015
9016 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
9017 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
9018 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
9019 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
9020 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
9021
9022 bind(L_128_done);
9023 // compute crc of a 128-bit value
9024 movdqu(xmm10, Address(table, 3 * 16));
9025 movdqu(xmm0, xmm7);
9026
9027 // 64b fold
9028 vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
9029 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
9030 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
9031
9032 // 32b fold
9033 movdqu(xmm0, xmm7);
9034 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
9035 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
9036 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
9037 jmp(L_barrett);
9038
9039 bind(L_less_than_256);
9040 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
9041
9042 //barrett reduction
9043 bind(L_barrett);
9044 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
9045 movdqu(xmm1, xmm7);
9046 movdqu(xmm2, xmm7);
9047 movdqu(xmm10, Address(table, 4 * 16));
9048
9049 pclmulqdq(xmm7, xmm10, 0x0);
9050 pxor(xmm7, xmm2);
9051 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
9052 movdqu(xmm2, xmm7);
9053 pclmulqdq(xmm7, xmm10, 0x10);
9054 pxor(xmm7, xmm2);
9055 pxor(xmm7, xmm1);
9056 pextrd(crc, xmm7, 2);
9057
9058 bind(L_cleanup);
9059 addptr(rsp, 16 * 2 + 8);
9060 pop(r12);
9061 }
9062
9063 // S. Gueron / Information Processing Letters 112 (2012) 184
9064 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
9065 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
9066 // Output: the 64-bit carry-less product of B * CONST
9067 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
9068 Register tmp1, Register tmp2, Register tmp3) {
9069 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
9070 if (n > 0) {
9071 addq(tmp3, n * 256 * 8);
9072 }
9073 // Q1 = TABLEExt[n][B & 0xFF];
9074 movl(tmp1, in);
9075 andl(tmp1, 0x000000FF);
9076 shll(tmp1, 3);
9077 addq(tmp1, tmp3);
9078 movq(tmp1, Address(tmp1, 0));
9079
9080 // Q2 = TABLEExt[n][B >> 8 & 0xFF];
9081 movl(tmp2, in);
9082 shrl(tmp2, 8);
9083 andl(tmp2, 0x000000FF);
9084 shll(tmp2, 3);
9085 addq(tmp2, tmp3);
9086 movq(tmp2, Address(tmp2, 0));
9087
9088 shlq(tmp2, 8);
9089 xorq(tmp1, tmp2);
9090
9091 // Q3 = TABLEExt[n][B >> 16 & 0xFF];
9092 movl(tmp2, in);
9093 shrl(tmp2, 16);
9094 andl(tmp2, 0x000000FF);
9095 shll(tmp2, 3);
9096 addq(tmp2, tmp3);
9097 movq(tmp2, Address(tmp2, 0));
9098
9099 shlq(tmp2, 16);
9100 xorq(tmp1, tmp2);
9101
9102 // Q4 = TABLEExt[n][B >> 24 & 0xFF];
9103 shrl(in, 24);
9104 andl(in, 0x000000FF);
9105 shll(in, 3);
9106 addq(in, tmp3);
9107 movq(in, Address(in, 0));
9108
9109 shlq(in, 24);
9110 xorq(in, tmp1);
9111 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
9112 }
9113
9114 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
9115 Register in_out,
9116 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
9117 XMMRegister w_xtmp2,
9118 Register tmp1,
9119 Register n_tmp2, Register n_tmp3) {
9120 if (is_pclmulqdq_supported) {
9121 movdl(w_xtmp1, in_out); // modified blindly
9122
9123 movl(tmp1, const_or_pre_comp_const_index);
9124 movdl(w_xtmp2, tmp1);
9125 pclmulqdq(w_xtmp1, w_xtmp2, 0);
9126
9127 movdq(in_out, w_xtmp1);
9128 } else {
9129 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
9130 }
9131 }
9132
9133 // Recombination Alternative 2: No bit-reflections
9134 // T1 = (CRC_A * U1) << 1
9135 // T2 = (CRC_B * U2) << 1
9136 // C1 = T1 >> 32
9137 // C2 = T2 >> 32
9138 // T1 = T1 & 0xFFFFFFFF
9139 // T2 = T2 & 0xFFFFFFFF
9140 // T1 = CRC32(0, T1)
9141 // T2 = CRC32(0, T2)
9142 // C1 = C1 ^ T1
9143 // C2 = C2 ^ T2
9144 // CRC = C1 ^ C2 ^ CRC_C
9145 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
9146 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9147 Register tmp1, Register tmp2,
9148 Register n_tmp3) {
9149 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9150 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9151 shlq(in_out, 1);
9152 movl(tmp1, in_out);
9153 shrq(in_out, 32);
9154 xorl(tmp2, tmp2);
9155 crc32(tmp2, tmp1, 4);
9156 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
9157 shlq(in1, 1);
9158 movl(tmp1, in1);
9159 shrq(in1, 32);
9160 xorl(tmp2, tmp2);
9161 crc32(tmp2, tmp1, 4);
9162 xorl(in1, tmp2);
9163 xorl(in_out, in1);
9164 xorl(in_out, in2);
9165 }
9166
9167 // Set N to predefined value
9168 // Subtract from a length of a buffer
9169 // execute in a loop:
9170 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
9171 // for i = 1 to N do
9172 // CRC_A = CRC32(CRC_A, A[i])
9173 // CRC_B = CRC32(CRC_B, B[i])
9174 // CRC_C = CRC32(CRC_C, C[i])
9175 // end for
9176 // Recombine
9177 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
9178 Register in_out1, Register in_out2, Register in_out3,
9179 Register tmp1, Register tmp2, Register tmp3,
9180 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9181 Register tmp4, Register tmp5,
9182 Register n_tmp6) {
9183 Label L_processPartitions;
9184 Label L_processPartition;
9185 Label L_exit;
9186
9187 bind(L_processPartitions);
9188 cmpl(in_out1, 3 * size);
9189 jcc(Assembler::less, L_exit);
9190 xorl(tmp1, tmp1);
9191 xorl(tmp2, tmp2);
9192 movq(tmp3, in_out2);
9193 addq(tmp3, size);
9194
9195 bind(L_processPartition);
9196 crc32(in_out3, Address(in_out2, 0), 8);
9197 crc32(tmp1, Address(in_out2, size), 8);
9198 crc32(tmp2, Address(in_out2, size * 2), 8);
9199 addq(in_out2, 8);
9200 cmpq(in_out2, tmp3);
9201 jcc(Assembler::less, L_processPartition);
9202 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
9203 w_xtmp1, w_xtmp2, w_xtmp3,
9204 tmp4, tmp5,
9205 n_tmp6);
9206 addq(in_out2, 2 * size);
9207 subl(in_out1, 3 * size);
9208 jmp(L_processPartitions);
9209
9210 bind(L_exit);
9211 }
9212
9213 // Algorithm 2: Pipelined usage of the CRC32 instruction.
9214 // Input: A buffer I of L bytes.
9215 // Output: the CRC32C value of the buffer.
9216 // Notations:
9217 // Write L = 24N + r, with N = floor (L/24).
9218 // r = L mod 24 (0 <= r < 24).
9219 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
9220 // N quadwords, and R consists of r bytes.
9221 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
9222 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
9223 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
9224 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
9225 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
9226 Register tmp1, Register tmp2, Register tmp3,
9227 Register tmp4, Register tmp5, Register tmp6,
9228 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9229 bool is_pclmulqdq_supported) {
9230 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
9231 Label L_wordByWord;
9232 Label L_byteByByteProlog;
9233 Label L_byteByByte;
9234 Label L_exit;
9235
9236 if (is_pclmulqdq_supported ) {
9237 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
9238 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
9239
9240 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
9241 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
9242
9243 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
9244 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
9245 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
9246 } else {
9247 const_or_pre_comp_const_index[0] = 1;
9248 const_or_pre_comp_const_index[1] = 0;
9249
9250 const_or_pre_comp_const_index[2] = 3;
9251 const_or_pre_comp_const_index[3] = 2;
9252
9253 const_or_pre_comp_const_index[4] = 5;
9254 const_or_pre_comp_const_index[5] = 4;
9255 }
9256 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
9257 in2, in1, in_out,
9258 tmp1, tmp2, tmp3,
9259 w_xtmp1, w_xtmp2, w_xtmp3,
9260 tmp4, tmp5,
9261 tmp6);
9262 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
9263 in2, in1, in_out,
9264 tmp1, tmp2, tmp3,
9265 w_xtmp1, w_xtmp2, w_xtmp3,
9266 tmp4, tmp5,
9267 tmp6);
9268 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
9269 in2, in1, in_out,
9270 tmp1, tmp2, tmp3,
9271 w_xtmp1, w_xtmp2, w_xtmp3,
9272 tmp4, tmp5,
9273 tmp6);
9274 movl(tmp1, in2);
9275 andl(tmp1, 0x00000007);
9276 negl(tmp1);
9277 addl(tmp1, in2);
9278 addq(tmp1, in1);
9279
9280 cmpq(in1, tmp1);
9281 jccb(Assembler::greaterEqual, L_byteByByteProlog);
9282 align(16);
9283 BIND(L_wordByWord);
9284 crc32(in_out, Address(in1, 0), 8);
9285 addq(in1, 8);
9286 cmpq(in1, tmp1);
9287 jcc(Assembler::less, L_wordByWord);
9288
9289 BIND(L_byteByByteProlog);
9290 andl(in2, 0x00000007);
9291 movl(tmp2, 1);
9292
9293 cmpl(tmp2, in2);
9294 jccb(Assembler::greater, L_exit);
9295 BIND(L_byteByByte);
9296 crc32(in_out, Address(in1, 0), 1);
9297 incq(in1);
9298 incl(tmp2);
9299 cmpl(tmp2, in2);
9300 jcc(Assembler::lessEqual, L_byteByByte);
9301
9302 BIND(L_exit);
9303 }
9304 #undef BIND
9305 #undef BLOCK_COMMENT
9306
9307 // Compress char[] array to byte[].
9308 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
9309 // Return the array length if every element in array can be encoded,
9310 // otherwise, the index of first non-latin1 (> 0xff) character.
9311 // @IntrinsicCandidate
9312 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
9313 // for (int i = 0; i < len; i++) {
9314 // char c = src[srcOff];
9315 // if (c > 0xff) {
9316 // return i; // return index of non-latin1 char
9317 // }
9318 // dst[dstOff] = (byte)c;
9319 // srcOff++;
9320 // dstOff++;
9321 // }
9322 // return len;
9323 // }
9324 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
9325 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
9326 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
9327 Register tmp5, Register result, KRegister mask1, KRegister mask2) {
9328 Label copy_chars_loop, done, reset_sp, copy_tail;
9329
9330 // rsi: src
9331 // rdi: dst
9332 // rdx: len
9333 // rcx: tmp5
9334 // rax: result
9335
9336 // rsi holds start addr of source char[] to be compressed
9337 // rdi holds start addr of destination byte[]
9338 // rdx holds length
9339
9340 assert(len != result, "");
9341
9342 // save length for return
9343 movl(result, len);
9344
9345 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
9346 VM_Version::supports_avx512vlbw() &&
9347 VM_Version::supports_bmi2()) {
9348
9349 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
9350
9351 // alignment
9352 Label post_alignment;
9353
9354 // if length of the string is less than 32, handle it the old fashioned way
9355 testl(len, -32);
9356 jcc(Assembler::zero, below_threshold);
9357
9358 // First check whether a character is compressible ( <= 0xFF).
9359 // Create mask to test for Unicode chars inside zmm vector
9360 movl(tmp5, 0x00FF);
9361 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
9362
9363 testl(len, -64);
9364 jccb(Assembler::zero, post_alignment);
9365
9366 movl(tmp5, dst);
9367 andl(tmp5, (32 - 1));
9368 negl(tmp5);
9369 andl(tmp5, (32 - 1));
9370
9371 // bail out when there is nothing to be done
9372 testl(tmp5, 0xFFFFFFFF);
9373 jccb(Assembler::zero, post_alignment);
9374
9375 // ~(~0 << len), where len is the # of remaining elements to process
9376 movl(len, 0xFFFFFFFF);
9377 shlxl(len, len, tmp5);
9378 notl(len);
9379 kmovdl(mask2, len);
9380 movl(len, result);
9381
9382 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9383 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9384 ktestd(mask1, mask2);
9385 jcc(Assembler::carryClear, copy_tail);
9386
9387 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9388
9389 addptr(src, tmp5);
9390 addptr(src, tmp5);
9391 addptr(dst, tmp5);
9392 subl(len, tmp5);
9393
9394 bind(post_alignment);
9395 // end of alignment
9396
9397 movl(tmp5, len);
9398 andl(tmp5, (32 - 1)); // tail count (in chars)
9399 andl(len, ~(32 - 1)); // vector count (in chars)
9400 jccb(Assembler::zero, copy_loop_tail);
9401
9402 lea(src, Address(src, len, Address::times_2));
9403 lea(dst, Address(dst, len, Address::times_1));
9404 negptr(len);
9405
9406 bind(copy_32_loop);
9407 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
9408 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
9409 kortestdl(mask1, mask1);
9410 jccb(Assembler::carryClear, reset_for_copy_tail);
9411
9412 // All elements in current processed chunk are valid candidates for
9413 // compression. Write a truncated byte elements to the memory.
9414 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
9415 addptr(len, 32);
9416 jccb(Assembler::notZero, copy_32_loop);
9417
9418 bind(copy_loop_tail);
9419 // bail out when there is nothing to be done
9420 testl(tmp5, 0xFFFFFFFF);
9421 jcc(Assembler::zero, done);
9422
9423 movl(len, tmp5);
9424
9425 // ~(~0 << len), where len is the # of remaining elements to process
9426 movl(tmp5, 0xFFFFFFFF);
9427 shlxl(tmp5, tmp5, len);
9428 notl(tmp5);
9429
9430 kmovdl(mask2, tmp5);
9431
9432 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9433 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9434 ktestd(mask1, mask2);
9435 jcc(Assembler::carryClear, copy_tail);
9436
9437 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9438 jmp(done);
9439
9440 bind(reset_for_copy_tail);
9441 lea(src, Address(src, tmp5, Address::times_2));
9442 lea(dst, Address(dst, tmp5, Address::times_1));
9443 subptr(len, tmp5);
9444 jmp(copy_chars_loop);
9445
9446 bind(below_threshold);
9447 }
9448
9449 if (UseSSE42Intrinsics) {
9450 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
9451
9452 // vectored compression
9453 testl(len, 0xfffffff8);
9454 jcc(Assembler::zero, copy_tail);
9455
9456 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors
9457 movdl(tmp1Reg, tmp5);
9458 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg
9459
9460 andl(len, 0xfffffff0);
9461 jccb(Assembler::zero, copy_16);
9462
9463 // compress 16 chars per iter
9464 pxor(tmp4Reg, tmp4Reg);
9465
9466 lea(src, Address(src, len, Address::times_2));
9467 lea(dst, Address(dst, len, Address::times_1));
9468 negptr(len);
9469
9470 bind(copy_32_loop);
9471 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters
9472 por(tmp4Reg, tmp2Reg);
9473 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
9474 por(tmp4Reg, tmp3Reg);
9475 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector
9476 jccb(Assembler::notZero, reset_for_copy_tail);
9477 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte
9478 movdqu(Address(dst, len, Address::times_1), tmp2Reg);
9479 addptr(len, 16);
9480 jccb(Assembler::notZero, copy_32_loop);
9481
9482 // compress next vector of 8 chars (if any)
9483 bind(copy_16);
9484 // len = 0
9485 testl(result, 0x00000008); // check if there's a block of 8 chars to compress
9486 jccb(Assembler::zero, copy_tail_sse);
9487
9488 pxor(tmp3Reg, tmp3Reg);
9489
9490 movdqu(tmp2Reg, Address(src, 0));
9491 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
9492 jccb(Assembler::notZero, reset_for_copy_tail);
9493 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte
9494 movq(Address(dst, 0), tmp2Reg);
9495 addptr(src, 16);
9496 addptr(dst, 8);
9497 jmpb(copy_tail_sse);
9498
9499 bind(reset_for_copy_tail);
9500 movl(tmp5, result);
9501 andl(tmp5, 0x0000000f);
9502 lea(src, Address(src, tmp5, Address::times_2));
9503 lea(dst, Address(dst, tmp5, Address::times_1));
9504 subptr(len, tmp5);
9505 jmpb(copy_chars_loop);
9506
9507 bind(copy_tail_sse);
9508 movl(len, result);
9509 andl(len, 0x00000007); // tail count (in chars)
9510 }
9511 // compress 1 char per iter
9512 bind(copy_tail);
9513 testl(len, len);
9514 jccb(Assembler::zero, done);
9515 lea(src, Address(src, len, Address::times_2));
9516 lea(dst, Address(dst, len, Address::times_1));
9517 negptr(len);
9518
9519 bind(copy_chars_loop);
9520 load_unsigned_short(tmp5, Address(src, len, Address::times_2));
9521 testl(tmp5, 0xff00); // check if Unicode char
9522 jccb(Assembler::notZero, reset_sp);
9523 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte
9524 increment(len);
9525 jccb(Assembler::notZero, copy_chars_loop);
9526
9527 // add len then return (len will be zero if compress succeeded, otherwise negative)
9528 bind(reset_sp);
9529 addl(result, len);
9530
9531 bind(done);
9532 }
9533
9534 // Inflate byte[] array to char[].
9535 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
9536 // @IntrinsicCandidate
9537 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
9538 // for (int i = 0; i < len; i++) {
9539 // dst[dstOff++] = (char)(src[srcOff++] & 0xff);
9540 // }
9541 // }
9542 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
9543 XMMRegister tmp1, Register tmp2, KRegister mask) {
9544 Label copy_chars_loop, done, below_threshold, avx3_threshold;
9545 // rsi: src
9546 // rdi: dst
9547 // rdx: len
9548 // rcx: tmp2
9549
9550 // rsi holds start addr of source byte[] to be inflated
9551 // rdi holds start addr of destination char[]
9552 // rdx holds length
9553 assert_different_registers(src, dst, len, tmp2);
9554 movl(tmp2, len);
9555 if ((UseAVX > 2) && // AVX512
9556 VM_Version::supports_avx512vlbw() &&
9557 VM_Version::supports_bmi2()) {
9558
9559 Label copy_32_loop, copy_tail;
9560 Register tmp3_aliased = len;
9561
9562 // if length of the string is less than 16, handle it in an old fashioned way
9563 testl(len, -16);
9564 jcc(Assembler::zero, below_threshold);
9565
9566 testl(len, -1 * AVX3Threshold);
9567 jcc(Assembler::zero, avx3_threshold);
9568
9569 // In order to use only one arithmetic operation for the main loop we use
9570 // this pre-calculation
9571 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
9572 andl(len, -32); // vector count
9573 jccb(Assembler::zero, copy_tail);
9574
9575 lea(src, Address(src, len, Address::times_1));
9576 lea(dst, Address(dst, len, Address::times_2));
9577 negptr(len);
9578
9579
9580 // inflate 32 chars per iter
9581 bind(copy_32_loop);
9582 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
9583 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
9584 addptr(len, 32);
9585 jcc(Assembler::notZero, copy_32_loop);
9586
9587 bind(copy_tail);
9588 // bail out when there is nothing to be done
9589 testl(tmp2, -1); // we don't destroy the contents of tmp2 here
9590 jcc(Assembler::zero, done);
9591
9592 // ~(~0 << length), where length is the # of remaining elements to process
9593 movl(tmp3_aliased, -1);
9594 shlxl(tmp3_aliased, tmp3_aliased, tmp2);
9595 notl(tmp3_aliased);
9596 kmovdl(mask, tmp3_aliased);
9597 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
9598 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
9599
9600 jmp(done);
9601 bind(avx3_threshold);
9602 }
9603 if (UseSSE42Intrinsics) {
9604 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
9605
9606 if (UseAVX > 1) {
9607 andl(tmp2, (16 - 1));
9608 andl(len, -16);
9609 jccb(Assembler::zero, copy_new_tail);
9610 } else {
9611 andl(tmp2, 0x00000007); // tail count (in chars)
9612 andl(len, 0xfffffff8); // vector count (in chars)
9613 jccb(Assembler::zero, copy_tail);
9614 }
9615
9616 // vectored inflation
9617 lea(src, Address(src, len, Address::times_1));
9618 lea(dst, Address(dst, len, Address::times_2));
9619 negptr(len);
9620
9621 if (UseAVX > 1) {
9622 bind(copy_16_loop);
9623 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
9624 vmovdqu(Address(dst, len, Address::times_2), tmp1);
9625 addptr(len, 16);
9626 jcc(Assembler::notZero, copy_16_loop);
9627
9628 bind(below_threshold);
9629 bind(copy_new_tail);
9630 movl(len, tmp2);
9631 andl(tmp2, 0x00000007);
9632 andl(len, 0xFFFFFFF8);
9633 jccb(Assembler::zero, copy_tail);
9634
9635 pmovzxbw(tmp1, Address(src, 0));
9636 movdqu(Address(dst, 0), tmp1);
9637 addptr(src, 8);
9638 addptr(dst, 2 * 8);
9639
9640 jmp(copy_tail, true);
9641 }
9642
9643 // inflate 8 chars per iter
9644 bind(copy_8_loop);
9645 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words
9646 movdqu(Address(dst, len, Address::times_2), tmp1);
9647 addptr(len, 8);
9648 jcc(Assembler::notZero, copy_8_loop);
9649
9650 bind(copy_tail);
9651 movl(len, tmp2);
9652
9653 cmpl(len, 4);
9654 jccb(Assembler::less, copy_bytes);
9655
9656 movdl(tmp1, Address(src, 0)); // load 4 byte chars
9657 pmovzxbw(tmp1, tmp1);
9658 movq(Address(dst, 0), tmp1);
9659 subptr(len, 4);
9660 addptr(src, 4);
9661 addptr(dst, 8);
9662
9663 bind(copy_bytes);
9664 } else {
9665 bind(below_threshold);
9666 }
9667
9668 testl(len, len);
9669 jccb(Assembler::zero, done);
9670 lea(src, Address(src, len, Address::times_1));
9671 lea(dst, Address(dst, len, Address::times_2));
9672 negptr(len);
9673
9674 // inflate 1 char per iter
9675 bind(copy_chars_loop);
9676 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char
9677 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word
9678 increment(len);
9679 jcc(Assembler::notZero, copy_chars_loop);
9680
9681 bind(done);
9682 }
9683
9684 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
9685 switch(type) {
9686 case T_BYTE:
9687 case T_BOOLEAN:
9688 evmovdqub(dst, kmask, src, merge, vector_len);
9689 break;
9690 case T_CHAR:
9691 case T_SHORT:
9692 evmovdquw(dst, kmask, src, merge, vector_len);
9693 break;
9694 case T_INT:
9695 case T_FLOAT:
9696 evmovdqul(dst, kmask, src, merge, vector_len);
9697 break;
9698 case T_LONG:
9699 case T_DOUBLE:
9700 evmovdquq(dst, kmask, src, merge, vector_len);
9701 break;
9702 default:
9703 fatal("Unexpected type argument %s", type2name(type));
9704 break;
9705 }
9706 }
9707
9708
9709 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
9710 switch(type) {
9711 case T_BYTE:
9712 case T_BOOLEAN:
9713 evmovdqub(dst, kmask, src, merge, vector_len);
9714 break;
9715 case T_CHAR:
9716 case T_SHORT:
9717 evmovdquw(dst, kmask, src, merge, vector_len);
9718 break;
9719 case T_INT:
9720 case T_FLOAT:
9721 evmovdqul(dst, kmask, src, merge, vector_len);
9722 break;
9723 case T_LONG:
9724 case T_DOUBLE:
9725 evmovdquq(dst, kmask, src, merge, vector_len);
9726 break;
9727 default:
9728 fatal("Unexpected type argument %s", type2name(type));
9729 break;
9730 }
9731 }
9732
9733 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
9734 switch(type) {
9735 case T_BYTE:
9736 case T_BOOLEAN:
9737 evmovdqub(dst, kmask, src, merge, vector_len);
9738 break;
9739 case T_CHAR:
9740 case T_SHORT:
9741 evmovdquw(dst, kmask, src, merge, vector_len);
9742 break;
9743 case T_INT:
9744 case T_FLOAT:
9745 evmovdqul(dst, kmask, src, merge, vector_len);
9746 break;
9747 case T_LONG:
9748 case T_DOUBLE:
9749 evmovdquq(dst, kmask, src, merge, vector_len);
9750 break;
9751 default:
9752 fatal("Unexpected type argument %s", type2name(type));
9753 break;
9754 }
9755 }
9756
9757 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
9758 switch(masklen) {
9759 case 2:
9760 knotbl(dst, src);
9761 movl(rtmp, 3);
9762 kmovbl(ktmp, rtmp);
9763 kandbl(dst, ktmp, dst);
9764 break;
9765 case 4:
9766 knotbl(dst, src);
9767 movl(rtmp, 15);
9768 kmovbl(ktmp, rtmp);
9769 kandbl(dst, ktmp, dst);
9770 break;
9771 case 8:
9772 knotbl(dst, src);
9773 break;
9774 case 16:
9775 knotwl(dst, src);
9776 break;
9777 case 32:
9778 knotdl(dst, src);
9779 break;
9780 case 64:
9781 knotql(dst, src);
9782 break;
9783 default:
9784 fatal("Unexpected vector length %d", masklen);
9785 break;
9786 }
9787 }
9788
9789 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9790 switch(type) {
9791 case T_BOOLEAN:
9792 case T_BYTE:
9793 kandbl(dst, src1, src2);
9794 break;
9795 case T_CHAR:
9796 case T_SHORT:
9797 kandwl(dst, src1, src2);
9798 break;
9799 case T_INT:
9800 case T_FLOAT:
9801 kanddl(dst, src1, src2);
9802 break;
9803 case T_LONG:
9804 case T_DOUBLE:
9805 kandql(dst, src1, src2);
9806 break;
9807 default:
9808 fatal("Unexpected type argument %s", type2name(type));
9809 break;
9810 }
9811 }
9812
9813 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9814 switch(type) {
9815 case T_BOOLEAN:
9816 case T_BYTE:
9817 korbl(dst, src1, src2);
9818 break;
9819 case T_CHAR:
9820 case T_SHORT:
9821 korwl(dst, src1, src2);
9822 break;
9823 case T_INT:
9824 case T_FLOAT:
9825 kordl(dst, src1, src2);
9826 break;
9827 case T_LONG:
9828 case T_DOUBLE:
9829 korql(dst, src1, src2);
9830 break;
9831 default:
9832 fatal("Unexpected type argument %s", type2name(type));
9833 break;
9834 }
9835 }
9836
9837 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9838 switch(type) {
9839 case T_BOOLEAN:
9840 case T_BYTE:
9841 kxorbl(dst, src1, src2);
9842 break;
9843 case T_CHAR:
9844 case T_SHORT:
9845 kxorwl(dst, src1, src2);
9846 break;
9847 case T_INT:
9848 case T_FLOAT:
9849 kxordl(dst, src1, src2);
9850 break;
9851 case T_LONG:
9852 case T_DOUBLE:
9853 kxorql(dst, src1, src2);
9854 break;
9855 default:
9856 fatal("Unexpected type argument %s", type2name(type));
9857 break;
9858 }
9859 }
9860
9861 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9862 switch(type) {
9863 case T_BOOLEAN:
9864 case T_BYTE:
9865 evpermb(dst, mask, nds, src, merge, vector_len); break;
9866 case T_CHAR:
9867 case T_SHORT:
9868 evpermw(dst, mask, nds, src, merge, vector_len); break;
9869 case T_INT:
9870 case T_FLOAT:
9871 evpermd(dst, mask, nds, src, merge, vector_len); break;
9872 case T_LONG:
9873 case T_DOUBLE:
9874 evpermq(dst, mask, nds, src, merge, vector_len); break;
9875 default:
9876 fatal("Unexpected type argument %s", type2name(type)); break;
9877 }
9878 }
9879
9880 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9881 switch(type) {
9882 case T_BOOLEAN:
9883 case T_BYTE:
9884 evpermb(dst, mask, nds, src, merge, vector_len); break;
9885 case T_CHAR:
9886 case T_SHORT:
9887 evpermw(dst, mask, nds, src, merge, vector_len); break;
9888 case T_INT:
9889 case T_FLOAT:
9890 evpermd(dst, mask, nds, src, merge, vector_len); break;
9891 case T_LONG:
9892 case T_DOUBLE:
9893 evpermq(dst, mask, nds, src, merge, vector_len); break;
9894 default:
9895 fatal("Unexpected type argument %s", type2name(type)); break;
9896 }
9897 }
9898
9899 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9900 switch(type) {
9901 case T_BYTE:
9902 evpminub(dst, mask, nds, src, merge, vector_len); break;
9903 case T_SHORT:
9904 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9905 case T_INT:
9906 evpminud(dst, mask, nds, src, merge, vector_len); break;
9907 case T_LONG:
9908 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9909 default:
9910 fatal("Unexpected type argument %s", type2name(type)); break;
9911 }
9912 }
9913
9914 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9915 switch(type) {
9916 case T_BYTE:
9917 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9918 case T_SHORT:
9919 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9920 case T_INT:
9921 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9922 case T_LONG:
9923 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9924 default:
9925 fatal("Unexpected type argument %s", type2name(type)); break;
9926 }
9927 }
9928
9929 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9930 switch(type) {
9931 case T_BYTE:
9932 evpminub(dst, mask, nds, src, merge, vector_len); break;
9933 case T_SHORT:
9934 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9935 case T_INT:
9936 evpminud(dst, mask, nds, src, merge, vector_len); break;
9937 case T_LONG:
9938 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9939 default:
9940 fatal("Unexpected type argument %s", type2name(type)); break;
9941 }
9942 }
9943
9944 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9945 switch(type) {
9946 case T_BYTE:
9947 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9948 case T_SHORT:
9949 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9950 case T_INT:
9951 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9952 case T_LONG:
9953 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9954 default:
9955 fatal("Unexpected type argument %s", type2name(type)); break;
9956 }
9957 }
9958
9959 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9960 switch(type) {
9961 case T_BYTE:
9962 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9963 case T_SHORT:
9964 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9965 case T_INT:
9966 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9967 case T_LONG:
9968 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9969 case T_FLOAT:
9970 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9971 case T_DOUBLE:
9972 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9973 default:
9974 fatal("Unexpected type argument %s", type2name(type)); break;
9975 }
9976 }
9977
9978 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9979 switch(type) {
9980 case T_BYTE:
9981 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9982 case T_SHORT:
9983 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9984 case T_INT:
9985 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9986 case T_LONG:
9987 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9988 case T_FLOAT:
9989 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9990 case T_DOUBLE:
9991 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9992 default:
9993 fatal("Unexpected type argument %s", type2name(type)); break;
9994 }
9995 }
9996
9997 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9998 switch(type) {
9999 case T_BYTE:
10000 evpminsb(dst, mask, nds, src, merge, vector_len); break;
10001 case T_SHORT:
10002 evpminsw(dst, mask, nds, src, merge, vector_len); break;
10003 case T_INT:
10004 evpminsd(dst, mask, nds, src, merge, vector_len); break;
10005 case T_LONG:
10006 evpminsq(dst, mask, nds, src, merge, vector_len); break;
10007 case T_FLOAT:
10008 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
10009 case T_DOUBLE:
10010 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
10011 default:
10012 fatal("Unexpected type argument %s", type2name(type)); break;
10013 }
10014 }
10015
10016 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10017 switch(type) {
10018 case T_BYTE:
10019 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
10020 case T_SHORT:
10021 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
10022 case T_INT:
10023 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
10024 case T_LONG:
10025 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
10026 case T_FLOAT:
10027 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
10028 case T_DOUBLE:
10029 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
10030 default:
10031 fatal("Unexpected type argument %s", type2name(type)); break;
10032 }
10033 }
10034
10035 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10036 switch(type) {
10037 case T_INT:
10038 evpxord(dst, mask, nds, src, merge, vector_len); break;
10039 case T_LONG:
10040 evpxorq(dst, mask, nds, src, merge, vector_len); break;
10041 default:
10042 fatal("Unexpected type argument %s", type2name(type)); break;
10043 }
10044 }
10045
10046 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10047 switch(type) {
10048 case T_INT:
10049 evpxord(dst, mask, nds, src, merge, vector_len); break;
10050 case T_LONG:
10051 evpxorq(dst, mask, nds, src, merge, vector_len); break;
10052 default:
10053 fatal("Unexpected type argument %s", type2name(type)); break;
10054 }
10055 }
10056
10057 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10058 switch(type) {
10059 case T_INT:
10060 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
10061 case T_LONG:
10062 evporq(dst, mask, nds, src, merge, vector_len); break;
10063 default:
10064 fatal("Unexpected type argument %s", type2name(type)); break;
10065 }
10066 }
10067
10068 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10069 switch(type) {
10070 case T_INT:
10071 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
10072 case T_LONG:
10073 evporq(dst, mask, nds, src, merge, vector_len); break;
10074 default:
10075 fatal("Unexpected type argument %s", type2name(type)); break;
10076 }
10077 }
10078
10079 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10080 switch(type) {
10081 case T_INT:
10082 evpandd(dst, mask, nds, src, merge, vector_len); break;
10083 case T_LONG:
10084 evpandq(dst, mask, nds, src, merge, vector_len); break;
10085 default:
10086 fatal("Unexpected type argument %s", type2name(type)); break;
10087 }
10088 }
10089
10090 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10091 switch(type) {
10092 case T_INT:
10093 evpandd(dst, mask, nds, src, merge, vector_len); break;
10094 case T_LONG:
10095 evpandq(dst, mask, nds, src, merge, vector_len); break;
10096 default:
10097 fatal("Unexpected type argument %s", type2name(type)); break;
10098 }
10099 }
10100
10101 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
10102 switch(masklen) {
10103 case 8:
10104 kortestbl(src1, src2);
10105 break;
10106 case 16:
10107 kortestwl(src1, src2);
10108 break;
10109 case 32:
10110 kortestdl(src1, src2);
10111 break;
10112 case 64:
10113 kortestql(src1, src2);
10114 break;
10115 default:
10116 fatal("Unexpected mask length %d", masklen);
10117 break;
10118 }
10119 }
10120
10121
10122 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
10123 switch(masklen) {
10124 case 8:
10125 ktestbl(src1, src2);
10126 break;
10127 case 16:
10128 ktestwl(src1, src2);
10129 break;
10130 case 32:
10131 ktestdl(src1, src2);
10132 break;
10133 case 64:
10134 ktestql(src1, src2);
10135 break;
10136 default:
10137 fatal("Unexpected mask length %d", masklen);
10138 break;
10139 }
10140 }
10141
10142 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10143 switch(type) {
10144 case T_INT:
10145 evprold(dst, mask, src, shift, merge, vlen_enc); break;
10146 case T_LONG:
10147 evprolq(dst, mask, src, shift, merge, vlen_enc); break;
10148 default:
10149 fatal("Unexpected type argument %s", type2name(type)); break;
10150 break;
10151 }
10152 }
10153
10154 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10155 switch(type) {
10156 case T_INT:
10157 evprord(dst, mask, src, shift, merge, vlen_enc); break;
10158 case T_LONG:
10159 evprorq(dst, mask, src, shift, merge, vlen_enc); break;
10160 default:
10161 fatal("Unexpected type argument %s", type2name(type)); break;
10162 }
10163 }
10164
10165 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10166 switch(type) {
10167 case T_INT:
10168 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
10169 case T_LONG:
10170 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
10171 default:
10172 fatal("Unexpected type argument %s", type2name(type)); break;
10173 }
10174 }
10175
10176 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10177 switch(type) {
10178 case T_INT:
10179 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
10180 case T_LONG:
10181 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
10182 default:
10183 fatal("Unexpected type argument %s", type2name(type)); break;
10184 }
10185 }
10186
10187 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10188 assert(rscratch != noreg || always_reachable(src), "missing");
10189
10190 if (reachable(src)) {
10191 evpandq(dst, nds, as_Address(src), vector_len);
10192 } else {
10193 lea(rscratch, src);
10194 evpandq(dst, nds, Address(rscratch, 0), vector_len);
10195 }
10196 }
10197
10198 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
10199 assert(rscratch != noreg || always_reachable(src), "missing");
10200
10201 if (reachable(src)) {
10202 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
10203 } else {
10204 lea(rscratch, src);
10205 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
10206 }
10207 }
10208
10209 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10210 assert(rscratch != noreg || always_reachable(src), "missing");
10211
10212 if (reachable(src)) {
10213 evporq(dst, nds, as_Address(src), vector_len);
10214 } else {
10215 lea(rscratch, src);
10216 evporq(dst, nds, Address(rscratch, 0), vector_len);
10217 }
10218 }
10219
10220 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10221 assert(rscratch != noreg || always_reachable(src), "missing");
10222
10223 if (reachable(src)) {
10224 vpshufb(dst, nds, as_Address(src), vector_len);
10225 } else {
10226 lea(rscratch, src);
10227 vpshufb(dst, nds, Address(rscratch, 0), vector_len);
10228 }
10229 }
10230
10231 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10232 assert(rscratch != noreg || always_reachable(src), "missing");
10233
10234 if (reachable(src)) {
10235 Assembler::vpor(dst, nds, as_Address(src), vector_len);
10236 } else {
10237 lea(rscratch, src);
10238 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
10239 }
10240 }
10241
10242 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
10243 assert(rscratch != noreg || always_reachable(src3), "missing");
10244
10245 if (reachable(src3)) {
10246 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
10247 } else {
10248 lea(rscratch, src3);
10249 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
10250 }
10251 }
10252
10253 #if COMPILER2_OR_JVMCI
10254
10255 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
10256 Register length, Register temp, int vec_enc) {
10257 // Computing mask for predicated vector store.
10258 movptr(temp, -1);
10259 bzhiq(temp, temp, length);
10260 kmov(mask, temp);
10261 evmovdqu(bt, mask, dst, xmm, true, vec_enc);
10262 }
10263
10264 // Set memory operation for length "less than" 64 bytes.
10265 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
10266 XMMRegister xmm, KRegister mask, Register length,
10267 Register temp, bool use64byteVector) {
10268 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10269 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10270 if (!use64byteVector) {
10271 fill32(dst, disp, xmm);
10272 subptr(length, 32 >> shift);
10273 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
10274 } else {
10275 assert(MaxVectorSize == 64, "vector length != 64");
10276 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
10277 }
10278 }
10279
10280
10281 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
10282 XMMRegister xmm, KRegister mask, Register length,
10283 Register temp) {
10284 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10285 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10286 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
10287 }
10288
10289
10290 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
10291 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10292 vmovdqu(dst, xmm);
10293 }
10294
10295 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
10296 fill32(Address(dst, disp), xmm);
10297 }
10298
10299 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
10300 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10301 if (!use64byteVector) {
10302 fill32(dst, xmm);
10303 fill32(dst.plus_disp(32), xmm);
10304 } else {
10305 evmovdquq(dst, xmm, Assembler::AVX_512bit);
10306 }
10307 }
10308
10309 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
10310 fill64(Address(dst, disp), xmm, use64byteVector);
10311 }
10312
10313 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
10314 Register count, Register rtmp, XMMRegister xtmp) {
10315 Label L_exit;
10316 Label L_fill_start;
10317 Label L_fill_64_bytes;
10318 Label L_fill_96_bytes;
10319 Label L_fill_128_bytes;
10320 Label L_fill_128_bytes_loop;
10321 Label L_fill_128_loop_header;
10322 Label L_fill_128_bytes_loop_header;
10323 Label L_fill_128_bytes_loop_pre_header;
10324 Label L_fill_zmm_sequence;
10325
10326 int shift = -1;
10327 switch(type) {
10328 case T_BYTE: shift = 0;
10329 break;
10330 case T_SHORT: shift = 1;
10331 break;
10332 case T_INT: shift = 2;
10333 break;
10334 /* Uncomment when LONG fill stubs are supported.
10335 case T_LONG: shift = 3;
10336 break;
10337 */
10338 default:
10339 fatal("Unhandled type: %s\n", type2name(type));
10340 }
10341
10342 if ((CopyAVX3Threshold != 0) || (MaxVectorSize == 32)) {
10343
10344 if (MaxVectorSize == 64) {
10345 cmpq(count, CopyAVX3Threshold >> shift);
10346 jcc(Assembler::greater, L_fill_zmm_sequence);
10347 }
10348
10349 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
10350
10351 bind(L_fill_start);
10352
10353 cmpq(count, 32 >> shift);
10354 jccb(Assembler::greater, L_fill_64_bytes);
10355 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
10356 jmp(L_exit);
10357
10358 bind(L_fill_64_bytes);
10359 cmpq(count, 64 >> shift);
10360 jccb(Assembler::greater, L_fill_96_bytes);
10361 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
10362 jmp(L_exit);
10363
10364 bind(L_fill_96_bytes);
10365 cmpq(count, 96 >> shift);
10366 jccb(Assembler::greater, L_fill_128_bytes);
10367 fill64(to, 0, xtmp);
10368 subq(count, 64 >> shift);
10369 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
10370 jmp(L_exit);
10371
10372 bind(L_fill_128_bytes);
10373 cmpq(count, 128 >> shift);
10374 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
10375 fill64(to, 0, xtmp);
10376 fill32(to, 64, xtmp);
10377 subq(count, 96 >> shift);
10378 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
10379 jmp(L_exit);
10380
10381 bind(L_fill_128_bytes_loop_pre_header);
10382 {
10383 mov(rtmp, to);
10384 andq(rtmp, 31);
10385 jccb(Assembler::zero, L_fill_128_bytes_loop_header);
10386 negq(rtmp);
10387 addq(rtmp, 32);
10388 mov64(r8, -1L);
10389 bzhiq(r8, r8, rtmp);
10390 kmovql(k2, r8);
10391 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10392 addq(to, rtmp);
10393 shrq(rtmp, shift);
10394 subq(count, rtmp);
10395 }
10396
10397 cmpq(count, 128 >> shift);
10398 jcc(Assembler::less, L_fill_start);
10399
10400 bind(L_fill_128_bytes_loop_header);
10401 subq(count, 128 >> shift);
10402
10403 align32();
10404 bind(L_fill_128_bytes_loop);
10405 fill64(to, 0, xtmp);
10406 fill64(to, 64, xtmp);
10407 addq(to, 128);
10408 subq(count, 128 >> shift);
10409 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10410
10411 addq(count, 128 >> shift);
10412 jcc(Assembler::zero, L_exit);
10413 jmp(L_fill_start);
10414 }
10415
10416 if (MaxVectorSize == 64) {
10417 // Sequence using 64 byte ZMM register.
10418 Label L_fill_128_bytes_zmm;
10419 Label L_fill_192_bytes_zmm;
10420 Label L_fill_192_bytes_loop_zmm;
10421 Label L_fill_192_bytes_loop_header_zmm;
10422 Label L_fill_192_bytes_loop_pre_header_zmm;
10423 Label L_fill_start_zmm_sequence;
10424
10425 bind(L_fill_zmm_sequence);
10426 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10427
10428 bind(L_fill_start_zmm_sequence);
10429 cmpq(count, 64 >> shift);
10430 jccb(Assembler::greater, L_fill_128_bytes_zmm);
10431 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10432 jmp(L_exit);
10433
10434 bind(L_fill_128_bytes_zmm);
10435 cmpq(count, 128 >> shift);
10436 jccb(Assembler::greater, L_fill_192_bytes_zmm);
10437 fill64(to, 0, xtmp, true);
10438 subq(count, 64 >> shift);
10439 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10440 jmp(L_exit);
10441
10442 bind(L_fill_192_bytes_zmm);
10443 cmpq(count, 192 >> shift);
10444 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10445 fill64(to, 0, xtmp, true);
10446 fill64(to, 64, xtmp, true);
10447 subq(count, 128 >> shift);
10448 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10449 jmp(L_exit);
10450
10451 bind(L_fill_192_bytes_loop_pre_header_zmm);
10452 {
10453 movq(rtmp, to);
10454 andq(rtmp, 63);
10455 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10456 negq(rtmp);
10457 addq(rtmp, 64);
10458 mov64(r8, -1L);
10459 bzhiq(r8, r8, rtmp);
10460 kmovql(k2, r8);
10461 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10462 addq(to, rtmp);
10463 shrq(rtmp, shift);
10464 subq(count, rtmp);
10465 }
10466
10467 cmpq(count, 192 >> shift);
10468 jcc(Assembler::less, L_fill_start_zmm_sequence);
10469
10470 bind(L_fill_192_bytes_loop_header_zmm);
10471 subq(count, 192 >> shift);
10472
10473 align32();
10474 bind(L_fill_192_bytes_loop_zmm);
10475 fill64(to, 0, xtmp, true);
10476 fill64(to, 64, xtmp, true);
10477 fill64(to, 128, xtmp, true);
10478 addq(to, 192);
10479 subq(count, 192 >> shift);
10480 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10481
10482 addq(count, 192 >> shift);
10483 jcc(Assembler::zero, L_exit);
10484 jmp(L_fill_start_zmm_sequence);
10485 }
10486 bind(L_exit);
10487 }
10488 #endif //COMPILER2_OR_JVMCI
10489
10490
10491 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10492 Label done;
10493 cvttss2sil(dst, src);
10494 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10495 cmpl(dst, 0x80000000); // float_sign_flip
10496 jccb(Assembler::notEqual, done);
10497 subptr(rsp, 8);
10498 movflt(Address(rsp, 0), src);
10499 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10500 pop(dst);
10501 bind(done);
10502 }
10503
10504 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10505 Label done;
10506 cvttsd2sil(dst, src);
10507 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10508 cmpl(dst, 0x80000000); // float_sign_flip
10509 jccb(Assembler::notEqual, done);
10510 subptr(rsp, 8);
10511 movdbl(Address(rsp, 0), src);
10512 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10513 pop(dst);
10514 bind(done);
10515 }
10516
10517 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10518 Label done;
10519 cvttss2siq(dst, src);
10520 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10521 jccb(Assembler::notEqual, done);
10522 subptr(rsp, 8);
10523 movflt(Address(rsp, 0), src);
10524 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10525 pop(dst);
10526 bind(done);
10527 }
10528
10529 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10530 // Following code is line by line assembly translation rounding algorithm.
10531 // Please refer to java.lang.Math.round(float) algorithm for details.
10532 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10533 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10534 const int32_t FloatConsts_EXP_BIAS = 127;
10535 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10536 const int32_t MINUS_32 = 0xFFFFFFE0;
10537 Label L_special_case, L_block1, L_exit;
10538 movl(rtmp, FloatConsts_EXP_BIT_MASK);
10539 movdl(dst, src);
10540 andl(dst, rtmp);
10541 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10542 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10543 subl(rtmp, dst);
10544 movl(rcx, rtmp);
10545 movl(dst, MINUS_32);
10546 testl(rtmp, dst);
10547 jccb(Assembler::notEqual, L_special_case);
10548 movdl(dst, src);
10549 andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10550 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10551 movdl(rtmp, src);
10552 testl(rtmp, rtmp);
10553 jccb(Assembler::greaterEqual, L_block1);
10554 negl(dst);
10555 bind(L_block1);
10556 sarl(dst);
10557 addl(dst, 0x1);
10558 sarl(dst, 0x1);
10559 jmp(L_exit);
10560 bind(L_special_case);
10561 convert_f2i(dst, src);
10562 bind(L_exit);
10563 }
10564
10565 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10566 // Following code is line by line assembly translation rounding algorithm.
10567 // Please refer to java.lang.Math.round(double) algorithm for details.
10568 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10569 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10570 const int64_t DoubleConsts_EXP_BIAS = 1023;
10571 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10572 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10573 Label L_special_case, L_block1, L_exit;
10574 mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10575 movq(dst, src);
10576 andq(dst, rtmp);
10577 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10578 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10579 subq(rtmp, dst);
10580 movq(rcx, rtmp);
10581 mov64(dst, MINUS_64);
10582 testq(rtmp, dst);
10583 jccb(Assembler::notEqual, L_special_case);
10584 movq(dst, src);
10585 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10586 andq(dst, rtmp);
10587 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10588 orq(dst, rtmp);
10589 movq(rtmp, src);
10590 testq(rtmp, rtmp);
10591 jccb(Assembler::greaterEqual, L_block1);
10592 negq(dst);
10593 bind(L_block1);
10594 sarq(dst);
10595 addq(dst, 0x1);
10596 sarq(dst, 0x1);
10597 jmp(L_exit);
10598 bind(L_special_case);
10599 convert_d2l(dst, src);
10600 bind(L_exit);
10601 }
10602
10603 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10604 Label done;
10605 cvttsd2siq(dst, src);
10606 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10607 jccb(Assembler::notEqual, done);
10608 subptr(rsp, 8);
10609 movdbl(Address(rsp, 0), src);
10610 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10611 pop(dst);
10612 bind(done);
10613 }
10614
10615 void MacroAssembler::cache_wb(Address line)
10616 {
10617 // 64 bit cpus always support clflush
10618 assert(VM_Version::supports_clflush(), "clflush should be available");
10619 bool optimized = VM_Version::supports_clflushopt();
10620 bool no_evict = VM_Version::supports_clwb();
10621
10622 // prefer clwb (writeback without evict) otherwise
10623 // prefer clflushopt (potentially parallel writeback with evict)
10624 // otherwise fallback on clflush (serial writeback with evict)
10625
10626 if (optimized) {
10627 if (no_evict) {
10628 clwb(line);
10629 } else {
10630 clflushopt(line);
10631 }
10632 } else {
10633 // no need for fence when using CLFLUSH
10634 clflush(line);
10635 }
10636 }
10637
10638 void MacroAssembler::cache_wbsync(bool is_pre)
10639 {
10640 assert(VM_Version::supports_clflush(), "clflush should be available");
10641 bool optimized = VM_Version::supports_clflushopt();
10642 bool no_evict = VM_Version::supports_clwb();
10643
10644 // pick the correct implementation
10645
10646 if (!is_pre && (optimized || no_evict)) {
10647 // need an sfence for post flush when using clflushopt or clwb
10648 // otherwise no no need for any synchroniaztion
10649
10650 sfence();
10651 }
10652 }
10653
10654 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10655 switch (cond) {
10656 // Note some conditions are synonyms for others
10657 case Assembler::zero: return Assembler::notZero;
10658 case Assembler::notZero: return Assembler::zero;
10659 case Assembler::less: return Assembler::greaterEqual;
10660 case Assembler::lessEqual: return Assembler::greater;
10661 case Assembler::greater: return Assembler::lessEqual;
10662 case Assembler::greaterEqual: return Assembler::less;
10663 case Assembler::below: return Assembler::aboveEqual;
10664 case Assembler::belowEqual: return Assembler::above;
10665 case Assembler::above: return Assembler::belowEqual;
10666 case Assembler::aboveEqual: return Assembler::below;
10667 case Assembler::overflow: return Assembler::noOverflow;
10668 case Assembler::noOverflow: return Assembler::overflow;
10669 case Assembler::negative: return Assembler::positive;
10670 case Assembler::positive: return Assembler::negative;
10671 case Assembler::parity: return Assembler::noParity;
10672 case Assembler::noParity: return Assembler::parity;
10673 }
10674 ShouldNotReachHere(); return Assembler::overflow;
10675 }
10676
10677 // This is simply a call to Thread::current()
10678 void MacroAssembler::get_thread_slow(Register thread) {
10679 if (thread != rax) {
10680 push(rax);
10681 }
10682 push(rdi);
10683 push(rsi);
10684 push(rdx);
10685 push(rcx);
10686 push(r8);
10687 push(r9);
10688 push(r10);
10689 push(r11);
10690
10691 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10692
10693 pop(r11);
10694 pop(r10);
10695 pop(r9);
10696 pop(r8);
10697 pop(rcx);
10698 pop(rdx);
10699 pop(rsi);
10700 pop(rdi);
10701 if (thread != rax) {
10702 mov(thread, rax);
10703 pop(rax);
10704 }
10705 }
10706
10707 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10708 Label L_stack_ok;
10709 if (bias == 0) {
10710 testptr(sp, 2 * wordSize - 1);
10711 } else {
10712 // lea(tmp, Address(rsp, bias);
10713 mov(tmp, sp);
10714 addptr(tmp, bias);
10715 testptr(tmp, 2 * wordSize - 1);
10716 }
10717 jcc(Assembler::equal, L_stack_ok);
10718 block_comment(msg);
10719 stop(msg);
10720 bind(L_stack_ok);
10721 }
10722
10723 // Implements fast-locking.
10724 //
10725 // obj: the object to be locked
10726 // reg_rax: rax
10727 // thread: the thread which attempts to lock obj
10728 // tmp: a temporary register
10729 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10730 Register thread = r15_thread;
10731
10732 assert(reg_rax == rax, "");
10733 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10734
10735 Label push;
10736 const Register top = tmp;
10737
10738 // Preload the markWord. It is important that this is the first
10739 // instruction emitted as it is part of C1's null check semantics.
10740 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10741
10742 if (UseObjectMonitorTable) {
10743 // Clear cache in case fast locking succeeds or we need to take the slow-path.
10744 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10745 }
10746
10747 if (DiagnoseSyncOnValueBasedClasses != 0) {
10748 load_klass(tmp, obj, rscratch1);
10749 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10750 jcc(Assembler::notZero, slow);
10751 }
10752
10753 // Load top.
10754 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10755
10756 // Check if the lock-stack is full.
10757 cmpl(top, LockStack::end_offset());
10758 jcc(Assembler::greaterEqual, slow);
10759
10760 // Check for recursion.
10761 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10762 jcc(Assembler::equal, push);
10763
10764 // Check header for monitor (0b10).
10765 testptr(reg_rax, markWord::monitor_value);
10766 jcc(Assembler::notZero, slow);
10767
10768 // Try to lock. Transition lock bits 0b01 => 0b00
10769 movptr(tmp, reg_rax);
10770 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10771 orptr(reg_rax, markWord::unlocked_value);
10772 // Mask inline_type bit such that we go to the slow path if object is an inline type
10773 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10774
10775 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10776 jcc(Assembler::notEqual, slow);
10777
10778 // Restore top, CAS clobbers register.
10779 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10780
10781 bind(push);
10782 // After successful lock, push object on lock-stack.
10783 movptr(Address(thread, top), obj);
10784 incrementl(top, oopSize);
10785 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10786 }
10787
10788 // Implements fast-unlocking.
10789 //
10790 // obj: the object to be unlocked
10791 // reg_rax: rax
10792 // thread: the thread
10793 // tmp: a temporary register
10794 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10795 Register thread = r15_thread;
10796
10797 assert(reg_rax == rax, "");
10798 assert_different_registers(obj, reg_rax, thread, tmp);
10799
10800 Label unlocked, push_and_slow;
10801 const Register top = tmp;
10802
10803 // Check if obj is top of lock-stack.
10804 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10805 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10806 jcc(Assembler::notEqual, slow);
10807
10808 // Pop lock-stack.
10809 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10810 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10811
10812 // Check if recursive.
10813 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10814 jcc(Assembler::equal, unlocked);
10815
10816 // Not recursive. Check header for monitor (0b10).
10817 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10818 testptr(reg_rax, markWord::monitor_value);
10819 jcc(Assembler::notZero, push_and_slow);
10820
10821 #ifdef ASSERT
10822 // Check header not unlocked (0b01).
10823 Label not_unlocked;
10824 testptr(reg_rax, markWord::unlocked_value);
10825 jcc(Assembler::zero, not_unlocked);
10826 stop("fast_unlock already unlocked");
10827 bind(not_unlocked);
10828 #endif
10829
10830 // Try to unlock. Transition lock bits 0b00 => 0b01
10831 movptr(tmp, reg_rax);
10832 orptr(tmp, markWord::unlocked_value);
10833 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10834 jcc(Assembler::equal, unlocked);
10835
10836 bind(push_and_slow);
10837 // Restore lock-stack and handle the unlock in runtime.
10838 #ifdef ASSERT
10839 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10840 movptr(Address(thread, top), obj);
10841 #endif
10842 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10843 jmp(slow);
10844
10845 bind(unlocked);
10846 }
10847
10848 // Saves legacy GPRs state on stack.
10849 void MacroAssembler::save_legacy_gprs() {
10850 subq(rsp, 16 * wordSize);
10851 movq(Address(rsp, 15 * wordSize), rax);
10852 movq(Address(rsp, 14 * wordSize), rcx);
10853 movq(Address(rsp, 13 * wordSize), rdx);
10854 movq(Address(rsp, 12 * wordSize), rbx);
10855 movq(Address(rsp, 10 * wordSize), rbp);
10856 movq(Address(rsp, 9 * wordSize), rsi);
10857 movq(Address(rsp, 8 * wordSize), rdi);
10858 movq(Address(rsp, 7 * wordSize), r8);
10859 movq(Address(rsp, 6 * wordSize), r9);
10860 movq(Address(rsp, 5 * wordSize), r10);
10861 movq(Address(rsp, 4 * wordSize), r11);
10862 movq(Address(rsp, 3 * wordSize), r12);
10863 movq(Address(rsp, 2 * wordSize), r13);
10864 movq(Address(rsp, wordSize), r14);
10865 movq(Address(rsp, 0), r15);
10866 }
10867
10868 // Resotres back legacy GPRs state from stack.
10869 void MacroAssembler::restore_legacy_gprs() {
10870 movq(r15, Address(rsp, 0));
10871 movq(r14, Address(rsp, wordSize));
10872 movq(r13, Address(rsp, 2 * wordSize));
10873 movq(r12, Address(rsp, 3 * wordSize));
10874 movq(r11, Address(rsp, 4 * wordSize));
10875 movq(r10, Address(rsp, 5 * wordSize));
10876 movq(r9, Address(rsp, 6 * wordSize));
10877 movq(r8, Address(rsp, 7 * wordSize));
10878 movq(rdi, Address(rsp, 8 * wordSize));
10879 movq(rsi, Address(rsp, 9 * wordSize));
10880 movq(rbp, Address(rsp, 10 * wordSize));
10881 movq(rbx, Address(rsp, 12 * wordSize));
10882 movq(rdx, Address(rsp, 13 * wordSize));
10883 movq(rcx, Address(rsp, 14 * wordSize));
10884 movq(rax, Address(rsp, 15 * wordSize));
10885 addq(rsp, 16 * wordSize);
10886 }
10887
10888 void MacroAssembler::load_aotrc_address(Register reg, address a) {
10889 #if INCLUDE_CDS
10890 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
10891 if (AOTCodeCache::is_on_for_dump()) {
10892 // all aotrc field addresses should be registered in the AOTCodeCache address table
10893 lea(reg, ExternalAddress(a));
10894 } else {
10895 mov64(reg, (uint64_t)a);
10896 }
10897 #else
10898 ShouldNotReachHere();
10899 #endif
10900 }
10901
10902 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10903 if (VM_Version::supports_apx_f()) {
10904 esetzucc(comparison, dst);
10905 } else {
10906 setb(comparison, dst);
10907 movzbl(dst, dst);
10908 }
10909 }