1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "compiler/compiler_globals.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "ci/ciInlineKlass.hpp"
32 #include "crc32c.h"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/tlab_globals.hpp"
37 #include "interpreter/bytecodeHistogram.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interpreterRuntime.hpp"
40 #include "jvm.h"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/accessDecorators.hpp"
44 #include "oops/compressedKlass.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/klass.inline.hpp"
47 #include "oops/resolvedFieldEntry.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/javaThread.hpp"
53 #include "runtime/jniHandles.hpp"
54 #include "runtime/objectMonitor.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/safepoint.hpp"
57 #include "runtime/safepointMechanism.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/signature_cc.hpp"
60 #include "runtime/stubRoutines.hpp"
61 #include "utilities/checkedCast.hpp"
62 #include "utilities/globalDefinitions.hpp"
63 #include "utilities/macros.hpp"
64 #include "vmreg_x86.inline.hpp"
65 #ifdef COMPILER2
66 #include "opto/output.hpp"
67 #endif
68
69 #ifdef PRODUCT
70 #define BLOCK_COMMENT(str) /* nothing */
71 #define STOP(error) stop(error)
72 #else
73 #define BLOCK_COMMENT(str) block_comment(str)
74 #define STOP(error) block_comment(error); stop(error)
75 #endif
76
77 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
78
79 #ifdef ASSERT
80 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
81 #endif
82
83 static const Assembler::Condition reverse[] = {
84 Assembler::noOverflow /* overflow = 0x0 */ ,
85 Assembler::overflow /* noOverflow = 0x1 */ ,
86 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
87 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
88 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
89 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
90 Assembler::above /* belowEqual = 0x6 */ ,
91 Assembler::belowEqual /* above = 0x7 */ ,
92 Assembler::positive /* negative = 0x8 */ ,
93 Assembler::negative /* positive = 0x9 */ ,
94 Assembler::noParity /* parity = 0xa */ ,
95 Assembler::parity /* noParity = 0xb */ ,
96 Assembler::greaterEqual /* less = 0xc */ ,
97 Assembler::less /* greaterEqual = 0xd */ ,
98 Assembler::greater /* lessEqual = 0xe */ ,
99 Assembler::lessEqual /* greater = 0xf, */
100
101 };
102
103
104 // Implementation of MacroAssembler
105
106 Address MacroAssembler::as_Address(AddressLiteral adr) {
107 // amd64 always does this as a pc-rel
108 // we can be absolute or disp based on the instruction type
109 // jmp/call are displacements others are absolute
110 assert(!adr.is_lval(), "must be rval");
111 assert(reachable(adr), "must be");
112 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
113
114 }
115
116 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
117 AddressLiteral base = adr.base();
118 lea(rscratch, base);
119 Address index = adr.index();
120 assert(index._disp == 0, "must not have disp"); // maybe it can?
121 Address array(rscratch, index._index, index._scale, index._disp);
122 return array;
123 }
124
125 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
126 Label L, E;
127
128 #ifdef _WIN64
129 // Windows always allocates space for it's register args
130 assert(num_args <= 4, "only register arguments supported");
131 subq(rsp, frame::arg_reg_save_area_bytes);
132 #endif
133
134 // Align stack if necessary
135 testl(rsp, 15);
136 jcc(Assembler::zero, L);
137
138 subq(rsp, 8);
139 call(RuntimeAddress(entry_point));
140 addq(rsp, 8);
141 jmp(E);
142
143 bind(L);
144 call(RuntimeAddress(entry_point));
145
146 bind(E);
147
148 #ifdef _WIN64
149 // restore stack pointer
150 addq(rsp, frame::arg_reg_save_area_bytes);
151 #endif
152 }
153
154 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
155 assert(!src2.is_lval(), "should use cmpptr");
156 assert(rscratch != noreg || always_reachable(src2), "missing");
157
158 if (reachable(src2)) {
159 cmpq(src1, as_Address(src2));
160 } else {
161 lea(rscratch, src2);
162 Assembler::cmpq(src1, Address(rscratch, 0));
163 }
164 }
165
166 int MacroAssembler::corrected_idivq(Register reg) {
167 // Full implementation of Java ldiv and lrem; checks for special
168 // case as described in JVM spec., p.243 & p.271. The function
169 // returns the (pc) offset of the idivl instruction - may be needed
170 // for implicit exceptions.
171 //
172 // normal case special case
173 //
174 // input : rax: dividend min_long
175 // reg: divisor (may not be eax/edx) -1
176 //
177 // output: rax: quotient (= rax idiv reg) min_long
178 // rdx: remainder (= rax irem reg) 0
179 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
180 static const int64_t min_long = 0x8000000000000000;
181 Label normal_case, special_case;
182
183 // check for special case
184 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
185 jcc(Assembler::notEqual, normal_case);
186 xorl(rdx, rdx); // prepare rdx for possible special case (where
187 // remainder = 0)
188 cmpq(reg, -1);
189 jcc(Assembler::equal, special_case);
190
191 // handle normal case
192 bind(normal_case);
193 cdqq();
194 int idivq_offset = offset();
195 idivq(reg);
196
197 // normal and special case exit
198 bind(special_case);
199
200 return idivq_offset;
201 }
202
203 void MacroAssembler::decrementq(Register reg, int value) {
204 if (value == min_jint) { subq(reg, value); return; }
205 if (value < 0) { incrementq(reg, -value); return; }
206 if (value == 0) { ; return; }
207 if (value == 1 && UseIncDec) { decq(reg) ; return; }
208 /* else */ { subq(reg, value) ; return; }
209 }
210
211 void MacroAssembler::decrementq(Address dst, int value) {
212 if (value == min_jint) { subq(dst, value); return; }
213 if (value < 0) { incrementq(dst, -value); return; }
214 if (value == 0) { ; return; }
215 if (value == 1 && UseIncDec) { decq(dst) ; return; }
216 /* else */ { subq(dst, value) ; return; }
217 }
218
219 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
220 assert(rscratch != noreg || always_reachable(dst), "missing");
221
222 if (reachable(dst)) {
223 incrementq(as_Address(dst));
224 } else {
225 lea(rscratch, dst);
226 incrementq(Address(rscratch, 0));
227 }
228 }
229
230 void MacroAssembler::incrementq(Register reg, int value) {
231 if (value == min_jint) { addq(reg, value); return; }
232 if (value < 0) { decrementq(reg, -value); return; }
233 if (value == 0) { ; return; }
234 if (value == 1 && UseIncDec) { incq(reg) ; return; }
235 /* else */ { addq(reg, value) ; return; }
236 }
237
238 void MacroAssembler::incrementq(Address dst, int value) {
239 if (value == min_jint) { addq(dst, value); return; }
240 if (value < 0) { decrementq(dst, -value); return; }
241 if (value == 0) { ; return; }
242 if (value == 1 && UseIncDec) { incq(dst) ; return; }
243 /* else */ { addq(dst, value) ; return; }
244 }
245
246 // 32bit can do a case table jump in one instruction but we no longer allow the base
247 // to be installed in the Address class
248 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
249 lea(rscratch, entry.base());
250 Address dispatch = entry.index();
251 assert(dispatch._base == noreg, "must be");
252 dispatch._base = rscratch;
253 jmp(dispatch);
254 }
255
256 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
257 ShouldNotReachHere(); // 64bit doesn't use two regs
258 cmpq(x_lo, y_lo);
259 }
260
261 void MacroAssembler::lea(Register dst, AddressLiteral src) {
262 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
263 }
264
265 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
266 lea(rscratch, adr);
267 movptr(dst, rscratch);
268 }
269
270 void MacroAssembler::leave() {
271 // %%% is this really better? Why not on 32bit too?
272 emit_int8((unsigned char)0xC9); // LEAVE
273 }
274
275 void MacroAssembler::lneg(Register hi, Register lo) {
276 ShouldNotReachHere(); // 64bit doesn't use two regs
277 negq(lo);
278 }
279
280 void MacroAssembler::movoop(Register dst, jobject obj) {
281 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
282 }
283
284 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
285 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
286 movq(dst, rscratch);
287 }
288
289 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
290 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
291 }
292
293 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
294 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
295 movq(dst, rscratch);
296 }
297
298 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
299 if (src.is_lval()) {
300 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
301 } else {
302 if (reachable(src)) {
303 movq(dst, as_Address(src));
304 } else {
305 lea(dst, src);
306 movq(dst, Address(dst, 0));
307 }
308 }
309 }
310
311 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
312 movq(as_Address(dst, rscratch), src);
313 }
314
315 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
316 movq(dst, as_Address(src, dst /*rscratch*/));
317 }
318
319 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
320 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
321 if (is_simm32(src)) {
322 movptr(dst, checked_cast<int32_t>(src));
323 } else {
324 mov64(rscratch, src);
325 movq(dst, rscratch);
326 }
327 }
328
329 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
330 movoop(rscratch, obj);
331 push(rscratch);
332 }
333
334 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
335 mov_metadata(rscratch, obj);
336 push(rscratch);
337 }
338
339 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
340 lea(rscratch, src);
341 if (src.is_lval()) {
342 push(rscratch);
343 } else {
344 pushq(Address(rscratch, 0));
345 }
346 }
347
348 static void pass_arg0(MacroAssembler* masm, Register arg) {
349 if (c_rarg0 != arg ) {
350 masm->mov(c_rarg0, arg);
351 }
352 }
353
354 static void pass_arg1(MacroAssembler* masm, Register arg) {
355 if (c_rarg1 != arg ) {
356 masm->mov(c_rarg1, arg);
357 }
358 }
359
360 static void pass_arg2(MacroAssembler* masm, Register arg) {
361 if (c_rarg2 != arg ) {
362 masm->mov(c_rarg2, arg);
363 }
364 }
365
366 static void pass_arg3(MacroAssembler* masm, Register arg) {
367 if (c_rarg3 != arg ) {
368 masm->mov(c_rarg3, arg);
369 }
370 }
371
372 void MacroAssembler::stop(const char* msg) {
373 if (ShowMessageBoxOnError) {
374 address rip = pc();
375 pusha(); // get regs on stack
376 lea(c_rarg1, InternalAddress(rip));
377 movq(c_rarg2, rsp); // pass pointer to regs array
378 }
379 // Skip AOT caching C strings in scratch buffer.
380 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
381 lea(c_rarg0, ExternalAddress((address) str));
382 andq(rsp, -16); // align stack as required by ABI
383 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
384 hlt();
385 }
386
387 void MacroAssembler::warn(const char* msg) {
388 push(rbp);
389 movq(rbp, rsp);
390 andq(rsp, -16); // align stack as required by push_CPU_state and call
391 push_CPU_state(); // keeps alignment at 16 bytes
392
393 #ifdef _WIN64
394 // Windows always allocates space for its register args
395 subq(rsp, frame::arg_reg_save_area_bytes);
396 #endif
397 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
398 lea(c_rarg0, ExternalAddress((address) str));
399 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
400
401 #ifdef _WIN64
402 // restore stack pointer
403 addq(rsp, frame::arg_reg_save_area_bytes);
404 #endif
405 pop_CPU_state();
406 mov(rsp, rbp);
407 pop(rbp);
408 }
409
410 void MacroAssembler::print_state() {
411 address rip = pc();
412 pusha(); // get regs on stack
413 push(rbp);
414 movq(rbp, rsp);
415 andq(rsp, -16); // align stack as required by push_CPU_state and call
416 push_CPU_state(); // keeps alignment at 16 bytes
417
418 lea(c_rarg0, InternalAddress(rip));
419 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
420 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
421
422 pop_CPU_state();
423 mov(rsp, rbp);
424 pop(rbp);
425 popa();
426 }
427
428 #ifndef PRODUCT
429 extern "C" void findpc(intptr_t x);
430 #endif
431
432 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
433 // In order to get locks to work, we need to fake a in_VM state
434 if (ShowMessageBoxOnError) {
435 JavaThread* thread = JavaThread::current();
436 JavaThreadState saved_state = thread->thread_state();
437 thread->set_thread_state(_thread_in_vm);
438 #ifndef PRODUCT
439 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
440 ttyLocker ttyl;
441 BytecodeCounter::print();
442 }
443 #endif
444 // To see where a verify_oop failed, get $ebx+40/X for this frame.
445 // XXX correct this offset for amd64
446 // This is the value of eip which points to where verify_oop will return.
447 if (os::message_box(msg, "Execution stopped, print registers?")) {
448 print_state64(pc, regs);
449 BREAKPOINT;
450 }
451 }
452 fatal("DEBUG MESSAGE: %s", msg);
453 }
454
455 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
456 ttyLocker ttyl;
457 DebuggingContext debugging{};
458 tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
459 #ifndef PRODUCT
460 tty->cr();
461 findpc(pc);
462 tty->cr();
463 #endif
464 #define PRINT_REG(rax, value) \
465 { tty->print("%s = ", #rax); os::print_location(tty, value); }
466 PRINT_REG(rax, regs[15]);
467 PRINT_REG(rbx, regs[12]);
468 PRINT_REG(rcx, regs[14]);
469 PRINT_REG(rdx, regs[13]);
470 PRINT_REG(rdi, regs[8]);
471 PRINT_REG(rsi, regs[9]);
472 PRINT_REG(rbp, regs[10]);
473 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
474 PRINT_REG(rsp, (intptr_t)(®s[16]));
475 PRINT_REG(r8 , regs[7]);
476 PRINT_REG(r9 , regs[6]);
477 PRINT_REG(r10, regs[5]);
478 PRINT_REG(r11, regs[4]);
479 PRINT_REG(r12, regs[3]);
480 PRINT_REG(r13, regs[2]);
481 PRINT_REG(r14, regs[1]);
482 PRINT_REG(r15, regs[0]);
483 #undef PRINT_REG
484 // Print some words near the top of the stack.
485 int64_t* rsp = ®s[16];
486 int64_t* dump_sp = rsp;
487 for (int col1 = 0; col1 < 8; col1++) {
488 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
489 os::print_location(tty, *dump_sp++);
490 }
491 for (int row = 0; row < 25; row++) {
492 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
493 for (int col = 0; col < 4; col++) {
494 tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
495 }
496 tty->cr();
497 }
498 // Print some instructions around pc:
499 Disassembler::decode((address)pc-64, (address)pc);
500 tty->print_cr("--------");
501 Disassembler::decode((address)pc, (address)pc+32);
502 }
503
504 // The java_calling_convention describes stack locations as ideal slots on
505 // a frame with no abi restrictions. Since we must observe abi restrictions
506 // (like the placement of the register window) the slots must be biased by
507 // the following value.
508 static int reg2offset_in(VMReg r) {
509 // Account for saved rbp and return address
510 // This should really be in_preserve_stack_slots
511 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
512 }
513
514 static int reg2offset_out(VMReg r) {
515 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
516 }
517
518 // A long move
519 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
520
521 // The calling conventions assures us that each VMregpair is either
522 // all really one physical register or adjacent stack slots.
523
524 if (src.is_single_phys_reg() ) {
525 if (dst.is_single_phys_reg()) {
526 if (dst.first() != src.first()) {
527 mov(dst.first()->as_Register(), src.first()->as_Register());
528 }
529 } else {
530 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
531 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
532 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
533 }
534 } else if (dst.is_single_phys_reg()) {
535 assert(src.is_single_reg(), "not a stack pair");
536 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
537 } else {
538 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
539 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
540 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
541 }
542 }
543
544 // A double move
545 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
546
547 // The calling conventions assures us that each VMregpair is either
548 // all really one physical register or adjacent stack slots.
549
550 if (src.is_single_phys_reg() ) {
551 if (dst.is_single_phys_reg()) {
552 // In theory these overlap but the ordering is such that this is likely a nop
553 if ( src.first() != dst.first()) {
554 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
555 }
556 } else {
557 assert(dst.is_single_reg(), "not a stack pair");
558 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
559 }
560 } else if (dst.is_single_phys_reg()) {
561 assert(src.is_single_reg(), "not a stack pair");
562 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
563 } else {
564 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
565 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
566 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
567 }
568 }
569
570
571 // A float arg may have to do float reg int reg conversion
572 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
573 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
574
575 // The calling conventions assures us that each VMregpair is either
576 // all really one physical register or adjacent stack slots.
577
578 if (src.first()->is_stack()) {
579 if (dst.first()->is_stack()) {
580 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
581 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
582 } else {
583 // stack to reg
584 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
585 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
586 }
587 } else if (dst.first()->is_stack()) {
588 // reg to stack
589 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
590 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
591 } else {
592 // reg to reg
593 // In theory these overlap but the ordering is such that this is likely a nop
594 if ( src.first() != dst.first()) {
595 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
596 }
597 }
598 }
599
600 // On 64 bit we will store integer like items to the stack as
601 // 64 bits items (x86_32/64 abi) even though java would only store
602 // 32bits for a parameter. On 32bit it will simply be 32 bits
603 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
604 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
605 if (src.first()->is_stack()) {
606 if (dst.first()->is_stack()) {
607 // stack to stack
608 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
609 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
610 } else {
611 // stack to reg
612 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
613 }
614 } else if (dst.first()->is_stack()) {
615 // reg to stack
616 // Do we really have to sign extend???
617 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
618 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
619 } else {
620 // Do we really have to sign extend???
621 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
622 if (dst.first() != src.first()) {
623 movq(dst.first()->as_Register(), src.first()->as_Register());
624 }
625 }
626 }
627
628 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
629 if (src.first()->is_stack()) {
630 if (dst.first()->is_stack()) {
631 // stack to stack
632 movq(rax, Address(rbp, reg2offset_in(src.first())));
633 movq(Address(rsp, reg2offset_out(dst.first())), rax);
634 } else {
635 // stack to reg
636 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
637 }
638 } else if (dst.first()->is_stack()) {
639 // reg to stack
640 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
641 } else {
642 if (dst.first() != src.first()) {
643 movq(dst.first()->as_Register(), src.first()->as_Register());
644 }
645 }
646 }
647
648 // An oop arg. Must pass a handle not the oop itself
649 void MacroAssembler::object_move(OopMap* map,
650 int oop_handle_offset,
651 int framesize_in_slots,
652 VMRegPair src,
653 VMRegPair dst,
654 bool is_receiver,
655 int* receiver_offset) {
656
657 // must pass a handle. First figure out the location we use as a handle
658
659 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
660
661 // See if oop is null if it is we need no handle
662
663 if (src.first()->is_stack()) {
664
665 // Oop is already on the stack as an argument
666 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
667 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
668 if (is_receiver) {
669 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
670 }
671
672 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
673 lea(rHandle, Address(rbp, reg2offset_in(src.first())));
674 // conditionally move a null
675 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
676 } else {
677
678 // Oop is in a register we must store it to the space we reserve
679 // on the stack for oop_handles and pass a handle if oop is non-null
680
681 const Register rOop = src.first()->as_Register();
682 int oop_slot;
683 if (rOop == j_rarg0)
684 oop_slot = 0;
685 else if (rOop == j_rarg1)
686 oop_slot = 1;
687 else if (rOop == j_rarg2)
688 oop_slot = 2;
689 else if (rOop == j_rarg3)
690 oop_slot = 3;
691 else if (rOop == j_rarg4)
692 oop_slot = 4;
693 else {
694 assert(rOop == j_rarg5, "wrong register");
695 oop_slot = 5;
696 }
697
698 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
699 int offset = oop_slot*VMRegImpl::stack_slot_size;
700
701 map->set_oop(VMRegImpl::stack2reg(oop_slot));
702 // Store oop in handle area, may be null
703 movptr(Address(rsp, offset), rOop);
704 if (is_receiver) {
705 *receiver_offset = offset;
706 }
707
708 cmpptr(rOop, NULL_WORD);
709 lea(rHandle, Address(rsp, offset));
710 // conditionally move a null from the handle area where it was just stored
711 cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
712 }
713
714 // If arg is on the stack then place it otherwise it is already in correct reg.
715 if (dst.first()->is_stack()) {
716 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
717 }
718 }
719
720 void MacroAssembler::addptr(Register dst, int32_t imm32) {
721 addq(dst, imm32);
722 }
723
724 void MacroAssembler::addptr(Register dst, Register src) {
725 addq(dst, src);
726 }
727
728 void MacroAssembler::addptr(Address dst, Register src) {
729 addq(dst, src);
730 }
731
732 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
733 assert(rscratch != noreg || always_reachable(src), "missing");
734
735 if (reachable(src)) {
736 Assembler::addsd(dst, as_Address(src));
737 } else {
738 lea(rscratch, src);
739 Assembler::addsd(dst, Address(rscratch, 0));
740 }
741 }
742
743 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
744 assert(rscratch != noreg || always_reachable(src), "missing");
745
746 if (reachable(src)) {
747 addss(dst, as_Address(src));
748 } else {
749 lea(rscratch, src);
750 addss(dst, Address(rscratch, 0));
751 }
752 }
753
754 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
755 assert(rscratch != noreg || always_reachable(src), "missing");
756
757 if (reachable(src)) {
758 Assembler::addpd(dst, as_Address(src));
759 } else {
760 lea(rscratch, src);
761 Assembler::addpd(dst, Address(rscratch, 0));
762 }
763 }
764
765 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only.
766 // Stub code is generated once and never copied.
767 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
768 void MacroAssembler::align64() {
769 align(64, (uint)(uintptr_t)pc());
770 }
771
772 void MacroAssembler::align32() {
773 align(32, (uint)(uintptr_t)pc());
774 }
775
776 void MacroAssembler::align(uint modulus) {
777 // 8273459: Ensure alignment is possible with current segment alignment
778 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
779 align(modulus, offset());
780 }
781
782 void MacroAssembler::align(uint modulus, uint target) {
783 if (target % modulus != 0) {
784 nop(modulus - (target % modulus));
785 }
786 }
787
788 void MacroAssembler::push_f(XMMRegister r) {
789 subptr(rsp, wordSize);
790 movflt(Address(rsp, 0), r);
791 }
792
793 void MacroAssembler::pop_f(XMMRegister r) {
794 movflt(r, Address(rsp, 0));
795 addptr(rsp, wordSize);
796 }
797
798 void MacroAssembler::push_d(XMMRegister r) {
799 subptr(rsp, 2 * wordSize);
800 movdbl(Address(rsp, 0), r);
801 }
802
803 void MacroAssembler::pop_d(XMMRegister r) {
804 movdbl(r, Address(rsp, 0));
805 addptr(rsp, 2 * Interpreter::stackElementSize);
806 }
807
808 void MacroAssembler::push_ppx(Register src) {
809 if (VM_Version::supports_apx_f()) {
810 pushp(src);
811 } else {
812 Assembler::push(src);
813 }
814 }
815
816 void MacroAssembler::pop_ppx(Register dst) {
817 if (VM_Version::supports_apx_f()) {
818 popp(dst);
819 } else {
820 Assembler::pop(dst);
821 }
822 }
823
824 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
825 // Used in sign-masking with aligned address.
826 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
827 assert(rscratch != noreg || always_reachable(src), "missing");
828
829 if (UseAVX > 2 &&
830 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
831 (dst->encoding() >= 16)) {
832 vpand(dst, dst, src, AVX_512bit, rscratch);
833 } else if (reachable(src)) {
834 Assembler::andpd(dst, as_Address(src));
835 } else {
836 lea(rscratch, src);
837 Assembler::andpd(dst, Address(rscratch, 0));
838 }
839 }
840
841 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
842 // Used in sign-masking with aligned address.
843 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
844 assert(rscratch != noreg || always_reachable(src), "missing");
845
846 if (reachable(src)) {
847 Assembler::andps(dst, as_Address(src));
848 } else {
849 lea(rscratch, src);
850 Assembler::andps(dst, Address(rscratch, 0));
851 }
852 }
853
854 void MacroAssembler::andptr(Register dst, int32_t imm32) {
855 andq(dst, imm32);
856 }
857
858 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
859 assert(rscratch != noreg || always_reachable(src), "missing");
860
861 if (reachable(src)) {
862 andq(dst, as_Address(src));
863 } else {
864 lea(rscratch, src);
865 andq(dst, Address(rscratch, 0));
866 }
867 }
868
869 void MacroAssembler::atomic_incl(Address counter_addr) {
870 lock();
871 incrementl(counter_addr);
872 }
873
874 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
875 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
876
877 if (reachable(counter_addr)) {
878 atomic_incl(as_Address(counter_addr));
879 } else {
880 lea(rscratch, counter_addr);
881 atomic_incl(Address(rscratch, 0));
882 }
883 }
884
885 void MacroAssembler::atomic_incq(Address counter_addr) {
886 lock();
887 incrementq(counter_addr);
888 }
889
890 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
891 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
892
893 if (reachable(counter_addr)) {
894 atomic_incq(as_Address(counter_addr));
895 } else {
896 lea(rscratch, counter_addr);
897 atomic_incq(Address(rscratch, 0));
898 }
899 }
900
901 // Writes to stack successive pages until offset reached to check for
902 // stack overflow + shadow pages. This clobbers tmp.
903 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
904 movptr(tmp, rsp);
905 // Bang stack for total size given plus shadow page size.
906 // Bang one page at a time because large size can bang beyond yellow and
907 // red zones.
908 Label loop;
909 bind(loop);
910 movl(Address(tmp, (-(int)os::vm_page_size())), size );
911 subptr(tmp, (int)os::vm_page_size());
912 subl(size, (int)os::vm_page_size());
913 jcc(Assembler::greater, loop);
914
915 // Bang down shadow pages too.
916 // At this point, (tmp-0) is the last address touched, so don't
917 // touch it again. (It was touched as (tmp-pagesize) but then tmp
918 // was post-decremented.) Skip this address by starting at i=1, and
919 // touch a few more pages below. N.B. It is important to touch all
920 // the way down including all pages in the shadow zone.
921 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
922 // this could be any sized move but this is can be a debugging crumb
923 // so the bigger the better.
924 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
925 }
926 }
927
928 void MacroAssembler::reserved_stack_check() {
929 // testing if reserved zone needs to be enabled
930 Label no_reserved_zone_enabling;
931
932 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
933 jcc(Assembler::below, no_reserved_zone_enabling);
934
935 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
936 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
937 should_not_reach_here();
938
939 bind(no_reserved_zone_enabling);
940 }
941
942 void MacroAssembler::c2bool(Register x) {
943 // implements x == 0 ? 0 : 1
944 // note: must only look at least-significant byte of x
945 // since C-style booleans are stored in one byte
946 // only! (was bug)
947 andl(x, 0xFF);
948 setb(Assembler::notZero, x);
949 }
950
951 // Wouldn't need if AddressLiteral version had new name
952 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
953 Assembler::call(L, rtype);
954 }
955
956 void MacroAssembler::call(Register entry) {
957 Assembler::call(entry);
958 }
959
960 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
961 assert(rscratch != noreg || always_reachable(entry), "missing");
962
963 if (reachable(entry)) {
964 Assembler::call_literal(entry.target(), entry.rspec());
965 } else {
966 lea(rscratch, entry);
967 Assembler::call(rscratch);
968 }
969 }
970
971 void MacroAssembler::ic_call(address entry, jint method_index) {
972 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
973 // Needs full 64-bit immediate for later patching.
974 Assembler::mov64(rax, (int64_t)Universe::non_oop_word());
975 call(AddressLiteral(entry, rh));
976 }
977
978 int MacroAssembler::ic_check_size() {
979 return UseCompactObjectHeaders ? 17 : 14;
980 }
981
982 int MacroAssembler::ic_check(int end_alignment) {
983 Register receiver = j_rarg0;
984 Register data = rax;
985 Register temp = rscratch1;
986
987 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
988 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
989 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
990 // before the inline cache check here, and not after
991 align(end_alignment, offset() + ic_check_size());
992
993 int uep_offset = offset();
994
995 if (UseCompactObjectHeaders) {
996 load_narrow_klass_compact(temp, receiver);
997 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
998 } else {
999 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1000 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
1001 }
1002
1003 // if inline cache check fails, then jump to runtime routine
1004 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1005 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
1006
1007 return uep_offset;
1008 }
1009
1010 void MacroAssembler::emit_static_call_stub() {
1011 // Static stub relocation also tags the Method* in the code-stream.
1012 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1013 // This is recognized as unresolved by relocs/nativeinst/ic code.
1014 jump(RuntimeAddress(pc()));
1015 }
1016
1017 // Implementation of call_VM versions
1018
1019 void MacroAssembler::call_VM(Register oop_result,
1020 address entry_point,
1021 bool check_exceptions) {
1022 Label C, E;
1023 call(C, relocInfo::none);
1024 jmp(E);
1025
1026 bind(C);
1027 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1028 ret(0);
1029
1030 bind(E);
1031 }
1032
1033 void MacroAssembler::call_VM(Register oop_result,
1034 address entry_point,
1035 Register arg_1,
1036 bool check_exceptions) {
1037 Label C, E;
1038 call(C, relocInfo::none);
1039 jmp(E);
1040
1041 bind(C);
1042 pass_arg1(this, arg_1);
1043 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1044 ret(0);
1045
1046 bind(E);
1047 }
1048
1049 void MacroAssembler::call_VM(Register oop_result,
1050 address entry_point,
1051 Register arg_1,
1052 Register arg_2,
1053 bool check_exceptions) {
1054 Label C, E;
1055 call(C, relocInfo::none);
1056 jmp(E);
1057
1058 bind(C);
1059
1060 assert_different_registers(arg_1, c_rarg2);
1061
1062 pass_arg2(this, arg_2);
1063 pass_arg1(this, arg_1);
1064 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1065 ret(0);
1066
1067 bind(E);
1068 }
1069
1070 void MacroAssembler::call_VM(Register oop_result,
1071 address entry_point,
1072 Register arg_1,
1073 Register arg_2,
1074 Register arg_3,
1075 bool check_exceptions) {
1076 Label C, E;
1077 call(C, relocInfo::none);
1078 jmp(E);
1079
1080 bind(C);
1081
1082 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1083 assert_different_registers(arg_2, c_rarg3);
1084 pass_arg3(this, arg_3);
1085 pass_arg2(this, arg_2);
1086 pass_arg1(this, arg_1);
1087 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1088 ret(0);
1089
1090 bind(E);
1091 }
1092
1093 void MacroAssembler::call_VM(Register oop_result,
1094 Register last_java_sp,
1095 address entry_point,
1096 int number_of_arguments,
1097 bool check_exceptions) {
1098 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1099 }
1100
1101 void MacroAssembler::call_VM(Register oop_result,
1102 Register last_java_sp,
1103 address entry_point,
1104 Register arg_1,
1105 bool check_exceptions) {
1106 pass_arg1(this, arg_1);
1107 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1108 }
1109
1110 void MacroAssembler::call_VM(Register oop_result,
1111 Register last_java_sp,
1112 address entry_point,
1113 Register arg_1,
1114 Register arg_2,
1115 bool check_exceptions) {
1116
1117 assert_different_registers(arg_1, c_rarg2);
1118 pass_arg2(this, arg_2);
1119 pass_arg1(this, arg_1);
1120 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1121 }
1122
1123 void MacroAssembler::call_VM(Register oop_result,
1124 Register last_java_sp,
1125 address entry_point,
1126 Register arg_1,
1127 Register arg_2,
1128 Register arg_3,
1129 bool check_exceptions) {
1130 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1131 assert_different_registers(arg_2, c_rarg3);
1132 pass_arg3(this, arg_3);
1133 pass_arg2(this, arg_2);
1134 pass_arg1(this, arg_1);
1135 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1136 }
1137
1138 void MacroAssembler::super_call_VM(Register oop_result,
1139 Register last_java_sp,
1140 address entry_point,
1141 int number_of_arguments,
1142 bool check_exceptions) {
1143 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1144 }
1145
1146 void MacroAssembler::super_call_VM(Register oop_result,
1147 Register last_java_sp,
1148 address entry_point,
1149 Register arg_1,
1150 bool check_exceptions) {
1151 pass_arg1(this, arg_1);
1152 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1153 }
1154
1155 void MacroAssembler::super_call_VM(Register oop_result,
1156 Register last_java_sp,
1157 address entry_point,
1158 Register arg_1,
1159 Register arg_2,
1160 bool check_exceptions) {
1161
1162 assert_different_registers(arg_1, c_rarg2);
1163 pass_arg2(this, arg_2);
1164 pass_arg1(this, arg_1);
1165 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1166 }
1167
1168 void MacroAssembler::super_call_VM(Register oop_result,
1169 Register last_java_sp,
1170 address entry_point,
1171 Register arg_1,
1172 Register arg_2,
1173 Register arg_3,
1174 bool check_exceptions) {
1175 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1176 assert_different_registers(arg_2, c_rarg3);
1177 pass_arg3(this, arg_3);
1178 pass_arg2(this, arg_2);
1179 pass_arg1(this, arg_1);
1180 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1181 }
1182
1183 void MacroAssembler::call_VM_base(Register oop_result,
1184 Register last_java_sp,
1185 address entry_point,
1186 int number_of_arguments,
1187 bool check_exceptions) {
1188 Register java_thread = r15_thread;
1189
1190 // determine last_java_sp register
1191 if (!last_java_sp->is_valid()) {
1192 last_java_sp = rsp;
1193 }
1194 // debugging support
1195 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1196 #ifdef ASSERT
1197 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
1198 // r12 is the heapbase.
1199 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
1200 #endif // ASSERT
1201
1202 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1203 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1204
1205 // push java thread (becomes first argument of C function)
1206
1207 mov(c_rarg0, r15_thread);
1208
1209 // set last Java frame before call
1210 assert(last_java_sp != rbp, "can't use ebp/rbp");
1211
1212 // Only interpreter should have to set fp
1213 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
1214
1215 // do the call, remove parameters
1216 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1217
1218 #ifdef ASSERT
1219 // Check that thread register is not clobbered.
1220 guarantee(java_thread != rax, "change this code");
1221 push(rax);
1222 { Label L;
1223 get_thread_slow(rax);
1224 cmpptr(java_thread, rax);
1225 jcc(Assembler::equal, L);
1226 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
1227 bind(L);
1228 }
1229 pop(rax);
1230 #endif
1231
1232 // reset last Java frame
1233 // Only interpreter should have to clear fp
1234 reset_last_Java_frame(true);
1235
1236 // C++ interp handles this in the interpreter
1237 check_and_handle_popframe();
1238 check_and_handle_earlyret();
1239
1240 if (check_exceptions) {
1241 // check for pending exceptions (java_thread is set upon return)
1242 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1243 // This used to conditionally jump to forward_exception however it is
1244 // possible if we relocate that the branch will not reach. So we must jump
1245 // around so we can always reach
1246
1247 Label ok;
1248 jcc(Assembler::equal, ok);
1249 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1250 bind(ok);
1251 }
1252
1253 // get oop result if there is one and reset the value in the thread
1254 if (oop_result->is_valid()) {
1255 get_vm_result_oop(oop_result);
1256 }
1257 }
1258
1259 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1260 // Calculate the value for last_Java_sp somewhat subtle.
1261 // call_VM does an intermediate call which places a return address on
1262 // the stack just under the stack pointer as the user finished with it.
1263 // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
1264
1265 // We've pushed one address, correct last_Java_sp
1266 lea(rax, Address(rsp, wordSize));
1267
1268 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
1269 }
1270
1271 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
1272 void MacroAssembler::call_VM_leaf0(address entry_point) {
1273 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1274 }
1275
1276 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1277 call_VM_leaf_base(entry_point, number_of_arguments);
1278 }
1279
1280 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1281 pass_arg0(this, arg_0);
1282 call_VM_leaf(entry_point, 1);
1283 }
1284
1285 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1286
1287 assert_different_registers(arg_0, c_rarg1);
1288 pass_arg1(this, arg_1);
1289 pass_arg0(this, arg_0);
1290 call_VM_leaf(entry_point, 2);
1291 }
1292
1293 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1294 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1295 assert_different_registers(arg_1, c_rarg2);
1296 pass_arg2(this, arg_2);
1297 pass_arg1(this, arg_1);
1298 pass_arg0(this, arg_0);
1299 call_VM_leaf(entry_point, 3);
1300 }
1301
1302 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1303 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1304 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1305 assert_different_registers(arg_2, c_rarg3);
1306 pass_arg3(this, arg_3);
1307 pass_arg2(this, arg_2);
1308 pass_arg1(this, arg_1);
1309 pass_arg0(this, arg_0);
1310 call_VM_leaf(entry_point, 3);
1311 }
1312
1313 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1314 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1315 }
1316
1317 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1318 pass_arg0(this, arg_0);
1319 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1320 }
1321
1322 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1323 assert_different_registers(arg_0, c_rarg1);
1324 pass_arg1(this, arg_1);
1325 pass_arg0(this, arg_0);
1326 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1327 }
1328
1329 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1330 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1331 assert_different_registers(arg_1, c_rarg2);
1332 pass_arg2(this, arg_2);
1333 pass_arg1(this, arg_1);
1334 pass_arg0(this, arg_0);
1335 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1336 }
1337
1338 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1339 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1340 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1341 assert_different_registers(arg_2, c_rarg3);
1342 pass_arg3(this, arg_3);
1343 pass_arg2(this, arg_2);
1344 pass_arg1(this, arg_1);
1345 pass_arg0(this, arg_0);
1346 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1347 }
1348
1349 void MacroAssembler::get_vm_result_oop(Register oop_result) {
1350 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
1351 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
1352 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1353 }
1354
1355 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
1356 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
1357 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
1358 }
1359
1360 void MacroAssembler::check_and_handle_earlyret() {
1361 }
1362
1363 void MacroAssembler::check_and_handle_popframe() {
1364 }
1365
1366 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
1367 assert(rscratch != noreg || always_reachable(src1), "missing");
1368
1369 if (reachable(src1)) {
1370 cmpl(as_Address(src1), imm);
1371 } else {
1372 lea(rscratch, src1);
1373 cmpl(Address(rscratch, 0), imm);
1374 }
1375 }
1376
1377 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
1378 assert(!src2.is_lval(), "use cmpptr");
1379 assert(rscratch != noreg || always_reachable(src2), "missing");
1380
1381 if (reachable(src2)) {
1382 cmpl(src1, as_Address(src2));
1383 } else {
1384 lea(rscratch, src2);
1385 cmpl(src1, Address(rscratch, 0));
1386 }
1387 }
1388
1389 void MacroAssembler::cmp32(Register src1, int32_t imm) {
1390 Assembler::cmpl(src1, imm);
1391 }
1392
1393 void MacroAssembler::cmp32(Register src1, Address src2) {
1394 Assembler::cmpl(src1, src2);
1395 }
1396
1397 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1398 ucomisd(opr1, opr2);
1399
1400 Label L;
1401 if (unordered_is_less) {
1402 movl(dst, -1);
1403 jcc(Assembler::parity, L);
1404 jcc(Assembler::below , L);
1405 movl(dst, 0);
1406 jcc(Assembler::equal , L);
1407 increment(dst);
1408 } else { // unordered is greater
1409 movl(dst, 1);
1410 jcc(Assembler::parity, L);
1411 jcc(Assembler::above , L);
1412 movl(dst, 0);
1413 jcc(Assembler::equal , L);
1414 decrementl(dst);
1415 }
1416 bind(L);
1417 }
1418
1419 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1420 ucomiss(opr1, opr2);
1421
1422 Label L;
1423 if (unordered_is_less) {
1424 movl(dst, -1);
1425 jcc(Assembler::parity, L);
1426 jcc(Assembler::below , L);
1427 movl(dst, 0);
1428 jcc(Assembler::equal , L);
1429 increment(dst);
1430 } else { // unordered is greater
1431 movl(dst, 1);
1432 jcc(Assembler::parity, L);
1433 jcc(Assembler::above , L);
1434 movl(dst, 0);
1435 jcc(Assembler::equal , L);
1436 decrementl(dst);
1437 }
1438 bind(L);
1439 }
1440
1441
1442 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
1443 assert(rscratch != noreg || always_reachable(src1), "missing");
1444
1445 if (reachable(src1)) {
1446 cmpb(as_Address(src1), imm);
1447 } else {
1448 lea(rscratch, src1);
1449 cmpb(Address(rscratch, 0), imm);
1450 }
1451 }
1452
1453 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
1454 assert(rscratch != noreg || always_reachable(src2), "missing");
1455
1456 if (src2.is_lval()) {
1457 movptr(rscratch, src2);
1458 Assembler::cmpq(src1, rscratch);
1459 } else if (reachable(src2)) {
1460 cmpq(src1, as_Address(src2));
1461 } else {
1462 lea(rscratch, src2);
1463 Assembler::cmpq(src1, Address(rscratch, 0));
1464 }
1465 }
1466
1467 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
1468 assert(src2.is_lval(), "not a mem-mem compare");
1469 // moves src2's literal address
1470 movptr(rscratch, src2);
1471 Assembler::cmpq(src1, rscratch);
1472 }
1473
1474 void MacroAssembler::cmpoop(Register src1, Register src2) {
1475 cmpptr(src1, src2);
1476 }
1477
1478 void MacroAssembler::cmpoop(Register src1, Address src2) {
1479 cmpptr(src1, src2);
1480 }
1481
1482 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
1483 movoop(rscratch, src2);
1484 cmpptr(src1, rscratch);
1485 }
1486
1487 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
1488 assert(rscratch != noreg || always_reachable(adr), "missing");
1489
1490 if (reachable(adr)) {
1491 lock();
1492 cmpxchgptr(reg, as_Address(adr));
1493 } else {
1494 lea(rscratch, adr);
1495 lock();
1496 cmpxchgptr(reg, Address(rscratch, 0));
1497 }
1498 }
1499
1500 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
1501 cmpxchgq(reg, adr);
1502 }
1503
1504 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1505 assert(rscratch != noreg || always_reachable(src), "missing");
1506
1507 if (reachable(src)) {
1508 Assembler::comisd(dst, as_Address(src));
1509 } else {
1510 lea(rscratch, src);
1511 Assembler::comisd(dst, Address(rscratch, 0));
1512 }
1513 }
1514
1515 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1516 assert(rscratch != noreg || always_reachable(src), "missing");
1517
1518 if (reachable(src)) {
1519 Assembler::comiss(dst, as_Address(src));
1520 } else {
1521 lea(rscratch, src);
1522 Assembler::comiss(dst, Address(rscratch, 0));
1523 }
1524 }
1525
1526
1527 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
1528 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
1529
1530 Condition negated_cond = negate_condition(cond);
1531 Label L;
1532 jcc(negated_cond, L);
1533 pushf(); // Preserve flags
1534 atomic_incl(counter_addr, rscratch);
1535 popf();
1536 bind(L);
1537 }
1538
1539 int MacroAssembler::corrected_idivl(Register reg) {
1540 // Full implementation of Java idiv and irem; checks for
1541 // special case as described in JVM spec., p.243 & p.271.
1542 // The function returns the (pc) offset of the idivl
1543 // instruction - may be needed for implicit exceptions.
1544 //
1545 // normal case special case
1546 //
1547 // input : rax,: dividend min_int
1548 // reg: divisor (may not be rax,/rdx) -1
1549 //
1550 // output: rax,: quotient (= rax, idiv reg) min_int
1551 // rdx: remainder (= rax, irem reg) 0
1552 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
1553 const int min_int = 0x80000000;
1554 Label normal_case, special_case;
1555
1556 // check for special case
1557 cmpl(rax, min_int);
1558 jcc(Assembler::notEqual, normal_case);
1559 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
1560 cmpl(reg, -1);
1561 jcc(Assembler::equal, special_case);
1562
1563 // handle normal case
1564 bind(normal_case);
1565 cdql();
1566 int idivl_offset = offset();
1567 idivl(reg);
1568
1569 // normal and special case exit
1570 bind(special_case);
1571
1572 return idivl_offset;
1573 }
1574
1575
1576
1577 void MacroAssembler::decrementl(Register reg, int value) {
1578 if (value == min_jint) {subl(reg, value) ; return; }
1579 if (value < 0) { incrementl(reg, -value); return; }
1580 if (value == 0) { ; return; }
1581 if (value == 1 && UseIncDec) { decl(reg) ; return; }
1582 /* else */ { subl(reg, value) ; return; }
1583 }
1584
1585 void MacroAssembler::decrementl(Address dst, int value) {
1586 if (value == min_jint) {subl(dst, value) ; return; }
1587 if (value < 0) { incrementl(dst, -value); return; }
1588 if (value == 0) { ; return; }
1589 if (value == 1 && UseIncDec) { decl(dst) ; return; }
1590 /* else */ { subl(dst, value) ; return; }
1591 }
1592
1593 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
1594 assert(shift_value > 0, "illegal shift value");
1595 Label _is_positive;
1596 testl (reg, reg);
1597 jcc (Assembler::positive, _is_positive);
1598 int offset = (1 << shift_value) - 1 ;
1599
1600 if (offset == 1) {
1601 incrementl(reg);
1602 } else {
1603 addl(reg, offset);
1604 }
1605
1606 bind (_is_positive);
1607 sarl(reg, shift_value);
1608 }
1609
1610 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1611 assert(rscratch != noreg || always_reachable(src), "missing");
1612
1613 if (reachable(src)) {
1614 Assembler::divsd(dst, as_Address(src));
1615 } else {
1616 lea(rscratch, src);
1617 Assembler::divsd(dst, Address(rscratch, 0));
1618 }
1619 }
1620
1621 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1622 assert(rscratch != noreg || always_reachable(src), "missing");
1623
1624 if (reachable(src)) {
1625 Assembler::divss(dst, as_Address(src));
1626 } else {
1627 lea(rscratch, src);
1628 Assembler::divss(dst, Address(rscratch, 0));
1629 }
1630 }
1631
1632 void MacroAssembler::enter() {
1633 push(rbp);
1634 mov(rbp, rsp);
1635 }
1636
1637 void MacroAssembler::post_call_nop() {
1638 if (!Continuations::enabled()) {
1639 return;
1640 }
1641 InstructionMark im(this);
1642 relocate(post_call_nop_Relocation::spec());
1643 InlineSkippedInstructionsCounter skipCounter(this);
1644 emit_int8((uint8_t)0x0f);
1645 emit_int8((uint8_t)0x1f);
1646 emit_int8((uint8_t)0x84);
1647 emit_int8((uint8_t)0x00);
1648 emit_int32(0x00);
1649 }
1650
1651 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1652 assert(rscratch != noreg || always_reachable(src), "missing");
1653 if (reachable(src)) {
1654 Assembler::mulpd(dst, as_Address(src));
1655 } else {
1656 lea(rscratch, src);
1657 Assembler::mulpd(dst, Address(rscratch, 0));
1658 }
1659 }
1660
1661 // dst = c = a * b + c
1662 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1663 Assembler::vfmadd231sd(c, a, b);
1664 if (dst != c) {
1665 movdbl(dst, c);
1666 }
1667 }
1668
1669 // dst = c = a * b + c
1670 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1671 Assembler::vfmadd231ss(c, a, b);
1672 if (dst != c) {
1673 movflt(dst, c);
1674 }
1675 }
1676
1677 // dst = c = a * b + c
1678 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1679 Assembler::vfmadd231pd(c, a, b, vector_len);
1680 if (dst != c) {
1681 vmovdqu(dst, c);
1682 }
1683 }
1684
1685 // dst = c = a * b + c
1686 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1687 Assembler::vfmadd231ps(c, a, b, vector_len);
1688 if (dst != c) {
1689 vmovdqu(dst, c);
1690 }
1691 }
1692
1693 // dst = c = a * b + c
1694 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1695 Assembler::vfmadd231pd(c, a, b, vector_len);
1696 if (dst != c) {
1697 vmovdqu(dst, c);
1698 }
1699 }
1700
1701 // dst = c = a * b + c
1702 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1703 Assembler::vfmadd231ps(c, a, b, vector_len);
1704 if (dst != c) {
1705 vmovdqu(dst, c);
1706 }
1707 }
1708
1709 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
1710 assert(rscratch != noreg || always_reachable(dst), "missing");
1711
1712 if (reachable(dst)) {
1713 incrementl(as_Address(dst));
1714 } else {
1715 lea(rscratch, dst);
1716 incrementl(Address(rscratch, 0));
1717 }
1718 }
1719
1720 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
1721 incrementl(as_Address(dst, rscratch));
1722 }
1723
1724 void MacroAssembler::incrementl(Register reg, int value) {
1725 if (value == min_jint) {addl(reg, value) ; return; }
1726 if (value < 0) { decrementl(reg, -value); return; }
1727 if (value == 0) { ; return; }
1728 if (value == 1 && UseIncDec) { incl(reg) ; return; }
1729 /* else */ { addl(reg, value) ; return; }
1730 }
1731
1732 void MacroAssembler::incrementl(Address dst, int value) {
1733 if (value == min_jint) {addl(dst, value) ; return; }
1734 if (value < 0) { decrementl(dst, -value); return; }
1735 if (value == 0) { ; return; }
1736 if (value == 1 && UseIncDec) { incl(dst) ; return; }
1737 /* else */ { addl(dst, value) ; return; }
1738 }
1739
1740 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
1741 assert(rscratch != noreg || always_reachable(dst), "missing");
1742 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
1743 if (reachable(dst)) {
1744 jmp_literal(dst.target(), dst.rspec());
1745 } else {
1746 lea(rscratch, dst);
1747 jmp(rscratch);
1748 }
1749 }
1750
1751 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
1752 assert(rscratch != noreg || always_reachable(dst), "missing");
1753 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
1754 if (reachable(dst)) {
1755 InstructionMark im(this);
1756 relocate(dst.reloc());
1757 const int short_size = 2;
1758 const int long_size = 6;
1759 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
1760 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
1761 // 0111 tttn #8-bit disp
1762 emit_int8(0x70 | cc);
1763 emit_int8((offs - short_size) & 0xFF);
1764 } else {
1765 // 0000 1111 1000 tttn #32-bit disp
1766 emit_int8(0x0F);
1767 emit_int8((unsigned char)(0x80 | cc));
1768 emit_int32(offs - long_size);
1769 }
1770 } else {
1771 #ifdef ASSERT
1772 warning("reversing conditional branch");
1773 #endif /* ASSERT */
1774 Label skip;
1775 jccb(reverse[cc], skip);
1776 lea(rscratch, dst);
1777 Assembler::jmp(rscratch);
1778 bind(skip);
1779 }
1780 }
1781
1782 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
1783 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
1784 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
1785
1786 stmxcsr(mxcsr_save);
1787 movl(tmp, mxcsr_save);
1788 if (EnableX86ECoreOpts) {
1789 // The mxcsr_std has status bits set for performance on ECore
1790 orl(tmp, 0x003f);
1791 } else {
1792 // Mask out status bits (only check control and mask bits)
1793 andl(tmp, 0xFFC0);
1794 }
1795 cmp32(tmp, mxcsr_std, rscratch);
1796 }
1797
1798 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
1799 assert(rscratch != noreg || always_reachable(src), "missing");
1800
1801 if (reachable(src)) {
1802 Assembler::ldmxcsr(as_Address(src));
1803 } else {
1804 lea(rscratch, src);
1805 Assembler::ldmxcsr(Address(rscratch, 0));
1806 }
1807 }
1808
1809 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1810 int off = offset();
1811 movsbl(dst, src); // movsxb
1812 return off;
1813 }
1814
1815 // Note: load_signed_short used to be called load_signed_word.
1816 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
1817 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
1818 // The term "word" in HotSpot means a 32- or 64-bit machine word.
1819 int MacroAssembler::load_signed_short(Register dst, Address src) {
1820 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
1821 // version but this is what 64bit has always done. This seems to imply
1822 // that users are only using 32bits worth.
1823 int off = offset();
1824 movswl(dst, src); // movsxw
1825 return off;
1826 }
1827
1828 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1829 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1830 // and "3.9 Partial Register Penalties", p. 22).
1831 int off = offset();
1832 movzbl(dst, src); // movzxb
1833 return off;
1834 }
1835
1836 // Note: load_unsigned_short used to be called load_unsigned_word.
1837 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1838 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1839 // and "3.9 Partial Register Penalties", p. 22).
1840 int off = offset();
1841 movzwl(dst, src); // movzxw
1842 return off;
1843 }
1844
1845 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1846 switch (size_in_bytes) {
1847 case 8: movq(dst, src); break;
1848 case 4: movl(dst, src); break;
1849 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1850 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1851 default: ShouldNotReachHere();
1852 }
1853 }
1854
1855 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1856 switch (size_in_bytes) {
1857 case 8: movq(dst, src); break;
1858 case 4: movl(dst, src); break;
1859 case 2: movw(dst, src); break;
1860 case 1: movb(dst, src); break;
1861 default: ShouldNotReachHere();
1862 }
1863 }
1864
1865 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
1866 assert(rscratch != noreg || always_reachable(dst), "missing");
1867
1868 if (reachable(dst)) {
1869 movl(as_Address(dst), src);
1870 } else {
1871 lea(rscratch, dst);
1872 movl(Address(rscratch, 0), src);
1873 }
1874 }
1875
1876 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
1877 if (reachable(src)) {
1878 movl(dst, as_Address(src));
1879 } else {
1880 lea(dst, src);
1881 movl(dst, Address(dst, 0));
1882 }
1883 }
1884
1885 // C++ bool manipulation
1886
1887 void MacroAssembler::movbool(Register dst, Address src) {
1888 if(sizeof(bool) == 1)
1889 movb(dst, src);
1890 else if(sizeof(bool) == 2)
1891 movw(dst, src);
1892 else if(sizeof(bool) == 4)
1893 movl(dst, src);
1894 else
1895 // unsupported
1896 ShouldNotReachHere();
1897 }
1898
1899 void MacroAssembler::movbool(Address dst, bool boolconst) {
1900 if(sizeof(bool) == 1)
1901 movb(dst, (int) boolconst);
1902 else if(sizeof(bool) == 2)
1903 movw(dst, (int) boolconst);
1904 else if(sizeof(bool) == 4)
1905 movl(dst, (int) boolconst);
1906 else
1907 // unsupported
1908 ShouldNotReachHere();
1909 }
1910
1911 void MacroAssembler::movbool(Address dst, Register src) {
1912 if(sizeof(bool) == 1)
1913 movb(dst, src);
1914 else if(sizeof(bool) == 2)
1915 movw(dst, src);
1916 else if(sizeof(bool) == 4)
1917 movl(dst, src);
1918 else
1919 // unsupported
1920 ShouldNotReachHere();
1921 }
1922
1923 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1924 assert(rscratch != noreg || always_reachable(src), "missing");
1925
1926 if (reachable(src)) {
1927 movdl(dst, as_Address(src));
1928 } else {
1929 lea(rscratch, src);
1930 movdl(dst, Address(rscratch, 0));
1931 }
1932 }
1933
1934 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
1935 assert(rscratch != noreg || always_reachable(src), "missing");
1936
1937 if (reachable(src)) {
1938 movq(dst, as_Address(src));
1939 } else {
1940 lea(rscratch, src);
1941 movq(dst, Address(rscratch, 0));
1942 }
1943 }
1944
1945 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1946 assert(rscratch != noreg || always_reachable(src), "missing");
1947
1948 if (reachable(src)) {
1949 if (UseXmmLoadAndClearUpper) {
1950 movsd (dst, as_Address(src));
1951 } else {
1952 movlpd(dst, as_Address(src));
1953 }
1954 } else {
1955 lea(rscratch, src);
1956 if (UseXmmLoadAndClearUpper) {
1957 movsd (dst, Address(rscratch, 0));
1958 } else {
1959 movlpd(dst, Address(rscratch, 0));
1960 }
1961 }
1962 }
1963
1964 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
1965 assert(rscratch != noreg || always_reachable(src), "missing");
1966
1967 if (reachable(src)) {
1968 movss(dst, as_Address(src));
1969 } else {
1970 lea(rscratch, src);
1971 movss(dst, Address(rscratch, 0));
1972 }
1973 }
1974
1975 void MacroAssembler::movhlf(XMMRegister dst, XMMRegister src, Register rscratch) {
1976 if (VM_Version::supports_avx10_2()) {
1977 evmovw(dst, src);
1978 } else {
1979 assert(rscratch != noreg, "missing");
1980 evmovw(rscratch, src);
1981 evmovw(dst, rscratch);
1982 }
1983 }
1984
1985 void MacroAssembler::mov64(Register dst, int64_t imm64) {
1986 if (is_uimm32(imm64)) {
1987 movl(dst, checked_cast<uint32_t>(imm64));
1988 } else if (is_simm32(imm64)) {
1989 movq(dst, checked_cast<int32_t>(imm64));
1990 } else {
1991 Assembler::mov64(dst, imm64);
1992 }
1993 }
1994
1995 void MacroAssembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) {
1996 Assembler::mov64(dst, imm64, rtype, format);
1997 }
1998
1999 void MacroAssembler::movptr(Register dst, Register src) {
2000 movq(dst, src);
2001 }
2002
2003 void MacroAssembler::movptr(Register dst, Address src) {
2004 movq(dst, src);
2005 }
2006
2007 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
2008 void MacroAssembler::movptr(Register dst, intptr_t src) {
2009 mov64(dst, src);
2010 }
2011
2012 void MacroAssembler::movptr(Address dst, Register src) {
2013 movq(dst, src);
2014 }
2015
2016 void MacroAssembler::movptr(Address dst, int32_t src) {
2017 movslq(dst, src);
2018 }
2019
2020 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
2021 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2022 Assembler::movdqu(dst, src);
2023 }
2024
2025 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
2026 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2027 Assembler::movdqu(dst, src);
2028 }
2029
2030 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
2031 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2032 Assembler::movdqu(dst, src);
2033 }
2034
2035 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2036 assert(rscratch != noreg || always_reachable(src), "missing");
2037
2038 if (reachable(src)) {
2039 movdqu(dst, as_Address(src));
2040 } else {
2041 lea(rscratch, src);
2042 movdqu(dst, Address(rscratch, 0));
2043 }
2044 }
2045
2046 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
2047 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2048 Assembler::vmovdqu(dst, src);
2049 }
2050
2051 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
2052 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2053 Assembler::vmovdqu(dst, src);
2054 }
2055
2056 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
2057 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2058 Assembler::vmovdqu(dst, src);
2059 }
2060
2061 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2062 assert(rscratch != noreg || always_reachable(src), "missing");
2063
2064 if (reachable(src)) {
2065 vmovdqu(dst, as_Address(src));
2066 }
2067 else {
2068 lea(rscratch, src);
2069 vmovdqu(dst, Address(rscratch, 0));
2070 }
2071 }
2072
2073 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2074 assert(rscratch != noreg || always_reachable(src), "missing");
2075
2076 if (vector_len == AVX_512bit) {
2077 evmovdquq(dst, src, AVX_512bit, rscratch);
2078 } else if (vector_len == AVX_256bit) {
2079 vmovdqu(dst, src, rscratch);
2080 } else {
2081 movdqu(dst, src, rscratch);
2082 }
2083 }
2084
2085 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
2086 if (vector_len == AVX_512bit) {
2087 evmovdquq(dst, src, AVX_512bit);
2088 } else if (vector_len == AVX_256bit) {
2089 vmovdqu(dst, src);
2090 } else {
2091 movdqu(dst, src);
2092 }
2093 }
2094
2095 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
2096 if (vector_len == AVX_512bit) {
2097 evmovdquq(dst, src, AVX_512bit);
2098 } else if (vector_len == AVX_256bit) {
2099 vmovdqu(dst, src);
2100 } else {
2101 movdqu(dst, src);
2102 }
2103 }
2104
2105 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
2106 if (vector_len == AVX_512bit) {
2107 evmovdquq(dst, src, AVX_512bit);
2108 } else if (vector_len == AVX_256bit) {
2109 vmovdqu(dst, src);
2110 } else {
2111 movdqu(dst, src);
2112 }
2113 }
2114
2115 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2116 assert(rscratch != noreg || always_reachable(src), "missing");
2117
2118 if (reachable(src)) {
2119 vmovdqa(dst, as_Address(src));
2120 }
2121 else {
2122 lea(rscratch, src);
2123 vmovdqa(dst, Address(rscratch, 0));
2124 }
2125 }
2126
2127 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2128 assert(rscratch != noreg || always_reachable(src), "missing");
2129
2130 if (vector_len == AVX_512bit) {
2131 evmovdqaq(dst, src, AVX_512bit, rscratch);
2132 } else if (vector_len == AVX_256bit) {
2133 vmovdqa(dst, src, rscratch);
2134 } else {
2135 movdqa(dst, src, rscratch);
2136 }
2137 }
2138
2139 void MacroAssembler::kmov(KRegister dst, Address src) {
2140 if (VM_Version::supports_avx512bw()) {
2141 kmovql(dst, src);
2142 } else {
2143 assert(VM_Version::supports_evex(), "");
2144 kmovwl(dst, src);
2145 }
2146 }
2147
2148 void MacroAssembler::kmov(Address dst, KRegister src) {
2149 if (VM_Version::supports_avx512bw()) {
2150 kmovql(dst, src);
2151 } else {
2152 assert(VM_Version::supports_evex(), "");
2153 kmovwl(dst, src);
2154 }
2155 }
2156
2157 void MacroAssembler::kmov(KRegister dst, KRegister src) {
2158 if (VM_Version::supports_avx512bw()) {
2159 kmovql(dst, src);
2160 } else {
2161 assert(VM_Version::supports_evex(), "");
2162 kmovwl(dst, src);
2163 }
2164 }
2165
2166 void MacroAssembler::kmov(Register dst, KRegister src) {
2167 if (VM_Version::supports_avx512bw()) {
2168 kmovql(dst, src);
2169 } else {
2170 assert(VM_Version::supports_evex(), "");
2171 kmovwl(dst, src);
2172 }
2173 }
2174
2175 void MacroAssembler::kmov(KRegister dst, Register src) {
2176 if (VM_Version::supports_avx512bw()) {
2177 kmovql(dst, src);
2178 } else {
2179 assert(VM_Version::supports_evex(), "");
2180 kmovwl(dst, src);
2181 }
2182 }
2183
2184 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
2185 assert(rscratch != noreg || always_reachable(src), "missing");
2186
2187 if (reachable(src)) {
2188 kmovql(dst, as_Address(src));
2189 } else {
2190 lea(rscratch, src);
2191 kmovql(dst, Address(rscratch, 0));
2192 }
2193 }
2194
2195 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
2196 assert(rscratch != noreg || always_reachable(src), "missing");
2197
2198 if (reachable(src)) {
2199 kmovwl(dst, as_Address(src));
2200 } else {
2201 lea(rscratch, src);
2202 kmovwl(dst, Address(rscratch, 0));
2203 }
2204 }
2205
2206 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2207 int vector_len, Register rscratch) {
2208 assert(rscratch != noreg || always_reachable(src), "missing");
2209
2210 if (reachable(src)) {
2211 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
2212 } else {
2213 lea(rscratch, src);
2214 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
2215 }
2216 }
2217
2218 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2219 int vector_len, Register rscratch) {
2220 assert(rscratch != noreg || always_reachable(src), "missing");
2221
2222 if (reachable(src)) {
2223 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
2224 } else {
2225 lea(rscratch, src);
2226 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
2227 }
2228 }
2229
2230 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2231 assert(rscratch != noreg || always_reachable(src), "missing");
2232
2233 if (reachable(src)) {
2234 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
2235 } else {
2236 lea(rscratch, src);
2237 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
2238 }
2239 }
2240
2241 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2242 assert(rscratch != noreg || always_reachable(src), "missing");
2243
2244 if (reachable(src)) {
2245 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
2246 } else {
2247 lea(rscratch, src);
2248 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
2249 }
2250 }
2251
2252 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2253 assert(rscratch != noreg || always_reachable(src), "missing");
2254
2255 if (reachable(src)) {
2256 Assembler::evmovdquq(dst, as_Address(src), vector_len);
2257 } else {
2258 lea(rscratch, src);
2259 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
2260 }
2261 }
2262
2263 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2264 assert(rscratch != noreg || always_reachable(src), "missing");
2265
2266 if (reachable(src)) {
2267 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
2268 } else {
2269 lea(rscratch, src);
2270 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
2271 }
2272 }
2273
2274 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2275 assert(rscratch != noreg || always_reachable(src), "missing");
2276
2277 if (reachable(src)) {
2278 Assembler::evmovdqaq(dst, as_Address(src), vector_len);
2279 } else {
2280 lea(rscratch, src);
2281 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
2282 }
2283 }
2284
2285 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2286 assert(rscratch != noreg || always_reachable(src), "missing");
2287
2288 if (reachable(src)) {
2289 Assembler::movapd(dst, as_Address(src));
2290 } else {
2291 lea(rscratch, src);
2292 Assembler::movapd(dst, Address(rscratch, 0));
2293 }
2294 }
2295
2296 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2297 assert(rscratch != noreg || always_reachable(src), "missing");
2298
2299 if (reachable(src)) {
2300 Assembler::movdqa(dst, as_Address(src));
2301 } else {
2302 lea(rscratch, src);
2303 Assembler::movdqa(dst, Address(rscratch, 0));
2304 }
2305 }
2306
2307 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2308 assert(rscratch != noreg || always_reachable(src), "missing");
2309
2310 if (reachable(src)) {
2311 Assembler::movsd(dst, as_Address(src));
2312 } else {
2313 lea(rscratch, src);
2314 Assembler::movsd(dst, Address(rscratch, 0));
2315 }
2316 }
2317
2318 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2319 assert(rscratch != noreg || always_reachable(src), "missing");
2320
2321 if (reachable(src)) {
2322 Assembler::movss(dst, as_Address(src));
2323 } else {
2324 lea(rscratch, src);
2325 Assembler::movss(dst, Address(rscratch, 0));
2326 }
2327 }
2328
2329 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
2330 assert(rscratch != noreg || always_reachable(src), "missing");
2331
2332 if (reachable(src)) {
2333 Assembler::movddup(dst, as_Address(src));
2334 } else {
2335 lea(rscratch, src);
2336 Assembler::movddup(dst, Address(rscratch, 0));
2337 }
2338 }
2339
2340 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2341 assert(rscratch != noreg || always_reachable(src), "missing");
2342
2343 if (reachable(src)) {
2344 Assembler::vmovddup(dst, as_Address(src), vector_len);
2345 } else {
2346 lea(rscratch, src);
2347 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
2348 }
2349 }
2350
2351 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2352 assert(rscratch != noreg || always_reachable(src), "missing");
2353
2354 if (reachable(src)) {
2355 Assembler::mulsd(dst, as_Address(src));
2356 } else {
2357 lea(rscratch, src);
2358 Assembler::mulsd(dst, Address(rscratch, 0));
2359 }
2360 }
2361
2362 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2363 assert(rscratch != noreg || always_reachable(src), "missing");
2364
2365 if (reachable(src)) {
2366 Assembler::mulss(dst, as_Address(src));
2367 } else {
2368 lea(rscratch, src);
2369 Assembler::mulss(dst, Address(rscratch, 0));
2370 }
2371 }
2372
2373 void MacroAssembler::null_check(Register reg, int offset) {
2374 if (needs_explicit_null_check(offset)) {
2375 // provoke OS null exception if reg is null by
2376 // accessing M[reg] w/o changing any (non-CC) registers
2377 // NOTE: cmpl is plenty here to provoke a segv
2378 cmpptr(rax, Address(reg, 0));
2379 // Note: should probably use testl(rax, Address(reg, 0));
2380 // may be shorter code (however, this version of
2381 // testl needs to be implemented first)
2382 } else {
2383 // nothing to do, (later) access of M[reg + offset]
2384 // will provoke OS null exception if reg is null
2385 }
2386 }
2387
2388 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2389 andptr(markword, markWord::inline_type_pattern_mask);
2390 cmpptr(markword, markWord::inline_type_pattern);
2391 jcc(Assembler::equal, is_inline_type);
2392 }
2393
2394 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2395 if (can_be_null) {
2396 testptr(object, object);
2397 jcc(Assembler::zero, not_inline_type);
2398 }
2399 const int is_inline_type_mask = markWord::inline_type_pattern;
2400 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2401 andptr(tmp, is_inline_type_mask);
2402 cmpptr(tmp, is_inline_type_mask);
2403 jcc(Assembler::notEqual, not_inline_type);
2404 }
2405
2406 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2407 movl(temp_reg, flags);
2408 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2409 jcc(Assembler::notEqual, is_null_free_inline_type);
2410 }
2411
2412 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2413 movl(temp_reg, flags);
2414 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2415 jcc(Assembler::equal, not_null_free_inline_type);
2416 }
2417
2418 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2419 movl(temp_reg, flags);
2420 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
2421 jcc(Assembler::notEqual, is_flat);
2422 }
2423
2424 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2425 movl(temp_reg, flags);
2426 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
2427 jcc(Assembler::notEqual, has_null_marker);
2428 }
2429
2430 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2431 Label test_mark_word;
2432 // load mark word
2433 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2434 // check displaced
2435 testl(temp_reg, markWord::unlocked_value);
2436 jccb(Assembler::notZero, test_mark_word);
2437 // slow path use klass prototype
2438 push(rscratch1);
2439 load_prototype_header(temp_reg, oop, rscratch1);
2440 pop(rscratch1);
2441
2442 bind(test_mark_word);
2443 testl(temp_reg, test_bit);
2444 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
2445 }
2446
2447 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
2448 Label& is_flat_array) {
2449 #ifdef _LP64
2450 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2451 #else
2452 load_klass(temp_reg, oop, noreg);
2453 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2454 test_flat_array_layout(temp_reg, is_flat_array);
2455 #endif
2456 }
2457
2458 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2459 Label& is_non_flat_array) {
2460 #ifdef _LP64
2461 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2462 #else
2463 load_klass(temp_reg, oop, noreg);
2464 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2465 test_non_flat_array_layout(temp_reg, is_non_flat_array);
2466 #endif
2467 }
2468
2469 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
2470 #ifdef _LP64
2471 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2472 #else
2473 Unimplemented();
2474 #endif
2475 }
2476
2477 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2478 #ifdef _LP64
2479 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2480 #else
2481 Unimplemented();
2482 #endif
2483 }
2484
2485 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2486 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2487 jcc(Assembler::notZero, is_flat_array);
2488 }
2489
2490 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2491 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2492 jcc(Assembler::zero, is_non_flat_array);
2493 }
2494
2495 void MacroAssembler::os_breakpoint() {
2496 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
2497 // (e.g., MSVC can't call ps() otherwise)
2498 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
2499 }
2500
2501 void MacroAssembler::unimplemented(const char* what) {
2502 const char* buf = nullptr;
2503 {
2504 ResourceMark rm;
2505 stringStream ss;
2506 ss.print("unimplemented: %s", what);
2507 buf = code_string(ss.as_string());
2508 }
2509 stop(buf);
2510 }
2511
2512 #define XSTATE_BV 0x200
2513
2514 void MacroAssembler::pop_CPU_state() {
2515 pop_FPU_state();
2516 pop_IU_state();
2517 }
2518
2519 void MacroAssembler::pop_FPU_state() {
2520 fxrstor(Address(rsp, 0));
2521 addptr(rsp, FPUStateSizeInWords * wordSize);
2522 }
2523
2524 void MacroAssembler::pop_IU_state() {
2525 popa();
2526 addq(rsp, 8);
2527 popf();
2528 }
2529
2530 // Save Integer and Float state
2531 // Warning: Stack must be 16 byte aligned (64bit)
2532 void MacroAssembler::push_CPU_state() {
2533 push_IU_state();
2534 push_FPU_state();
2535 }
2536
2537 void MacroAssembler::push_FPU_state() {
2538 subptr(rsp, FPUStateSizeInWords * wordSize);
2539 fxsave(Address(rsp, 0));
2540 }
2541
2542 void MacroAssembler::push_IU_state() {
2543 // Push flags first because pusha kills them
2544 pushf();
2545 // Make sure rsp stays 16-byte aligned
2546 subq(rsp, 8);
2547 pusha();
2548 }
2549
2550 void MacroAssembler::push_cont_fastpath() {
2551 if (!Continuations::enabled()) return;
2552
2553 Label L_done;
2554 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2555 jccb(Assembler::belowEqual, L_done);
2556 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
2557 bind(L_done);
2558 }
2559
2560 void MacroAssembler::pop_cont_fastpath() {
2561 if (!Continuations::enabled()) return;
2562
2563 Label L_done;
2564 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2565 jccb(Assembler::below, L_done);
2566 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
2567 bind(L_done);
2568 }
2569
2570 #ifdef ASSERT
2571 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
2572 Label no_cont;
2573 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
2574 testl(cont, cont);
2575 jcc(Assembler::zero, no_cont);
2576 stop(name);
2577 bind(no_cont);
2578 }
2579 #endif
2580
2581 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
2582 // we must set sp to zero to clear frame
2583 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
2584 // must clear fp, so that compiled frames are not confused; it is
2585 // possible that we need it only for debugging
2586 if (clear_fp) {
2587 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2588 }
2589 // Always clear the pc because it could have been set by make_walkable()
2590 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
2591 vzeroupper();
2592 }
2593
2594 void MacroAssembler::round_to(Register reg, int modulus) {
2595 addptr(reg, modulus - 1);
2596 andptr(reg, -modulus);
2597 }
2598
2599 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
2600 if (at_return) {
2601 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
2602 // we may safely use rsp instead to perform the stack watermark check.
2603 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
2604 jcc(Assembler::above, slow_path);
2605 return;
2606 }
2607 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2608 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
2609 }
2610
2611 // Calls to C land
2612 //
2613 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
2614 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
2615 // has to be reset to 0. This is required to allow proper stack traversal.
2616 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2617 Register last_java_fp,
2618 address last_java_pc,
2619 Register rscratch) {
2620 vzeroupper();
2621 // determine last_java_sp register
2622 if (!last_java_sp->is_valid()) {
2623 last_java_sp = rsp;
2624 }
2625 // last_java_fp is optional
2626 if (last_java_fp->is_valid()) {
2627 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
2628 }
2629 // last_java_pc is optional
2630 if (last_java_pc != nullptr) {
2631 Address java_pc(r15_thread,
2632 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
2633 lea(java_pc, InternalAddress(last_java_pc), rscratch);
2634 }
2635 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
2636 }
2637
2638 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2639 Register last_java_fp,
2640 Label &L,
2641 Register scratch) {
2642 lea(scratch, L);
2643 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
2644 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
2645 }
2646
2647 void MacroAssembler::shlptr(Register dst, int imm8) {
2648 shlq(dst, imm8);
2649 }
2650
2651 void MacroAssembler::shrptr(Register dst, int imm8) {
2652 shrq(dst, imm8);
2653 }
2654
2655 void MacroAssembler::sign_extend_byte(Register reg) {
2656 movsbl(reg, reg); // movsxb
2657 }
2658
2659 void MacroAssembler::sign_extend_short(Register reg) {
2660 movswl(reg, reg); // movsxw
2661 }
2662
2663 void MacroAssembler::narrow_subword_type(Register reg, BasicType bt) {
2664 assert(is_subword_type(bt), "required");
2665 switch (bt) {
2666 case T_BOOLEAN: andl(reg, 1); break;
2667 case T_BYTE: movsbl(reg, reg); break;
2668 case T_CHAR: movzwl(reg, reg); break;
2669 case T_SHORT: movswl(reg, reg); break;
2670 default: ShouldNotReachHere();
2671 }
2672 }
2673
2674 void MacroAssembler::testl(Address dst, int32_t imm32) {
2675 if (imm32 >= 0 && is8bit(imm32)) {
2676 testb(dst, imm32);
2677 } else {
2678 Assembler::testl(dst, imm32);
2679 }
2680 }
2681
2682 void MacroAssembler::testl(Register dst, int32_t imm32) {
2683 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
2684 testb(dst, imm32);
2685 } else {
2686 Assembler::testl(dst, imm32);
2687 }
2688 }
2689
2690 void MacroAssembler::testl(Register dst, AddressLiteral src) {
2691 assert(always_reachable(src), "Address should be reachable");
2692 testl(dst, as_Address(src));
2693 }
2694
2695 void MacroAssembler::testq(Address dst, int32_t imm32) {
2696 if (imm32 >= 0) {
2697 testl(dst, imm32);
2698 } else {
2699 Assembler::testq(dst, imm32);
2700 }
2701 }
2702
2703 void MacroAssembler::testq(Register dst, int32_t imm32) {
2704 if (imm32 >= 0) {
2705 testl(dst, imm32);
2706 } else {
2707 Assembler::testq(dst, imm32);
2708 }
2709 }
2710
2711 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
2712 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2713 Assembler::pcmpeqb(dst, src);
2714 }
2715
2716 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
2717 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2718 Assembler::pcmpeqw(dst, src);
2719 }
2720
2721 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2722 assert((dst->encoding() < 16),"XMM register should be 0-15");
2723 Assembler::pcmpestri(dst, src, imm8);
2724 }
2725
2726 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2727 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2728 Assembler::pcmpestri(dst, src, imm8);
2729 }
2730
2731 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2732 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2733 Assembler::pmovzxbw(dst, src);
2734 }
2735
2736 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
2737 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2738 Assembler::pmovzxbw(dst, src);
2739 }
2740
2741 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
2742 assert((src->encoding() < 16),"XMM register should be 0-15");
2743 Assembler::pmovmskb(dst, src);
2744 }
2745
2746 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
2747 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2748 Assembler::ptest(dst, src);
2749 }
2750
2751 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2752 assert(rscratch != noreg || always_reachable(src), "missing");
2753
2754 if (reachable(src)) {
2755 Assembler::sqrtss(dst, as_Address(src));
2756 } else {
2757 lea(rscratch, src);
2758 Assembler::sqrtss(dst, Address(rscratch, 0));
2759 }
2760 }
2761
2762 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2763 assert(rscratch != noreg || always_reachable(src), "missing");
2764
2765 if (reachable(src)) {
2766 Assembler::subsd(dst, as_Address(src));
2767 } else {
2768 lea(rscratch, src);
2769 Assembler::subsd(dst, Address(rscratch, 0));
2770 }
2771 }
2772
2773 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
2774 assert(rscratch != noreg || always_reachable(src), "missing");
2775
2776 if (reachable(src)) {
2777 Assembler::roundsd(dst, as_Address(src), rmode);
2778 } else {
2779 lea(rscratch, src);
2780 Assembler::roundsd(dst, Address(rscratch, 0), rmode);
2781 }
2782 }
2783
2784 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2785 assert(rscratch != noreg || always_reachable(src), "missing");
2786
2787 if (reachable(src)) {
2788 Assembler::subss(dst, as_Address(src));
2789 } else {
2790 lea(rscratch, src);
2791 Assembler::subss(dst, Address(rscratch, 0));
2792 }
2793 }
2794
2795 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2796 assert(rscratch != noreg || always_reachable(src), "missing");
2797
2798 if (reachable(src)) {
2799 Assembler::ucomisd(dst, as_Address(src));
2800 } else {
2801 lea(rscratch, src);
2802 Assembler::ucomisd(dst, Address(rscratch, 0));
2803 }
2804 }
2805
2806 void MacroAssembler::evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2807 assert(rscratch != noreg || always_reachable(src), "missing");
2808
2809 if (reachable(src)) {
2810 Assembler::evucomxsd(dst, as_Address(src));
2811 } else {
2812 lea(rscratch, src);
2813 Assembler::evucomxsd(dst, Address(rscratch, 0));
2814 }
2815 }
2816
2817 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2818 assert(rscratch != noreg || always_reachable(src), "missing");
2819
2820 if (reachable(src)) {
2821 Assembler::ucomiss(dst, as_Address(src));
2822 } else {
2823 lea(rscratch, src);
2824 Assembler::ucomiss(dst, Address(rscratch, 0));
2825 }
2826 }
2827
2828 void MacroAssembler::evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2829 assert(rscratch != noreg || always_reachable(src), "missing");
2830
2831 if (reachable(src)) {
2832 Assembler::evucomxss(dst, as_Address(src));
2833 } else {
2834 lea(rscratch, src);
2835 Assembler::evucomxss(dst, Address(rscratch, 0));
2836 }
2837 }
2838
2839 void MacroAssembler::evucomish(XMMRegister dst, AddressLiteral src, Register rscratch) {
2840 assert(rscratch != noreg || always_reachable(src), "missing");
2841
2842 if (reachable(src)) {
2843 Assembler::evucomish(dst, as_Address(src));
2844 } else {
2845 lea(rscratch, src);
2846 Assembler::evucomish(dst, Address(rscratch, 0));
2847 }
2848 }
2849
2850 void MacroAssembler::evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch) {
2851 assert(rscratch != noreg || always_reachable(src), "missing");
2852
2853 if (reachable(src)) {
2854 Assembler::evucomxsh(dst, as_Address(src));
2855 } else {
2856 lea(rscratch, src);
2857 Assembler::evucomxsh(dst, Address(rscratch, 0));
2858 }
2859 }
2860
2861 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2862 assert(rscratch != noreg || always_reachable(src), "missing");
2863
2864 // Used in sign-bit flipping with aligned address.
2865 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2866
2867 if (UseAVX > 2 &&
2868 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2869 (dst->encoding() >= 16)) {
2870 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2871 } else if (reachable(src)) {
2872 Assembler::xorpd(dst, as_Address(src));
2873 } else {
2874 lea(rscratch, src);
2875 Assembler::xorpd(dst, Address(rscratch, 0));
2876 }
2877 }
2878
2879 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
2880 if (UseAVX > 2 &&
2881 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2882 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2883 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2884 } else {
2885 Assembler::xorpd(dst, src);
2886 }
2887 }
2888
2889 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
2890 if (UseAVX > 2 &&
2891 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2892 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2893 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2894 } else {
2895 Assembler::xorps(dst, src);
2896 }
2897 }
2898
2899 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
2900 assert(rscratch != noreg || always_reachable(src), "missing");
2901
2902 // Used in sign-bit flipping with aligned address.
2903 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2904
2905 if (UseAVX > 2 &&
2906 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2907 (dst->encoding() >= 16)) {
2908 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2909 } else if (reachable(src)) {
2910 Assembler::xorps(dst, as_Address(src));
2911 } else {
2912 lea(rscratch, src);
2913 Assembler::xorps(dst, Address(rscratch, 0));
2914 }
2915 }
2916
2917 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
2918 assert(rscratch != noreg || always_reachable(src), "missing");
2919
2920 // Used in sign-bit flipping with aligned address.
2921 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
2922 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
2923 if (reachable(src)) {
2924 Assembler::pshufb(dst, as_Address(src));
2925 } else {
2926 lea(rscratch, src);
2927 Assembler::pshufb(dst, Address(rscratch, 0));
2928 }
2929 }
2930
2931 // AVX 3-operands instructions
2932
2933 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2934 assert(rscratch != noreg || always_reachable(src), "missing");
2935
2936 if (reachable(src)) {
2937 vaddsd(dst, nds, as_Address(src));
2938 } else {
2939 lea(rscratch, src);
2940 vaddsd(dst, nds, Address(rscratch, 0));
2941 }
2942 }
2943
2944 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2945 assert(rscratch != noreg || always_reachable(src), "missing");
2946
2947 if (reachable(src)) {
2948 vaddss(dst, nds, as_Address(src));
2949 } else {
2950 lea(rscratch, src);
2951 vaddss(dst, nds, Address(rscratch, 0));
2952 }
2953 }
2954
2955 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2956 assert(UseAVX > 0, "requires some form of AVX");
2957 assert(rscratch != noreg || always_reachable(src), "missing");
2958
2959 if (reachable(src)) {
2960 Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
2961 } else {
2962 lea(rscratch, src);
2963 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
2964 }
2965 }
2966
2967 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2968 assert(UseAVX > 0, "requires some form of AVX");
2969 assert(rscratch != noreg || always_reachable(src), "missing");
2970
2971 if (reachable(src)) {
2972 Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
2973 } else {
2974 lea(rscratch, src);
2975 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
2976 }
2977 }
2978
2979 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2980 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2981 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2982
2983 vandps(dst, nds, negate_field, vector_len, rscratch);
2984 }
2985
2986 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2987 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2988 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2989
2990 vandpd(dst, nds, negate_field, vector_len, rscratch);
2991 }
2992
2993 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2994 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2995 Assembler::vpaddb(dst, nds, src, vector_len);
2996 }
2997
2998 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2999 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3000 Assembler::vpaddb(dst, nds, src, vector_len);
3001 }
3002
3003 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3004 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3005 Assembler::vpaddw(dst, nds, src, vector_len);
3006 }
3007
3008 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3009 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3010 Assembler::vpaddw(dst, nds, src, vector_len);
3011 }
3012
3013 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3014 assert(rscratch != noreg || always_reachable(src), "missing");
3015
3016 if (reachable(src)) {
3017 Assembler::vpand(dst, nds, as_Address(src), vector_len);
3018 } else {
3019 lea(rscratch, src);
3020 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
3021 }
3022 }
3023
3024 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3025 assert(rscratch != noreg || always_reachable(src), "missing");
3026
3027 if (reachable(src)) {
3028 Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
3029 } else {
3030 lea(rscratch, src);
3031 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
3032 }
3033 }
3034
3035 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3036 assert(rscratch != noreg || always_reachable(src), "missing");
3037
3038 if (reachable(src)) {
3039 Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
3040 } else {
3041 lea(rscratch, src);
3042 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
3043 }
3044 }
3045
3046 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3047 assert(rscratch != noreg || always_reachable(src), "missing");
3048
3049 if (reachable(src)) {
3050 Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
3051 } else {
3052 lea(rscratch, src);
3053 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
3054 }
3055 }
3056
3057 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3058 assert(rscratch != noreg || always_reachable(src), "missing");
3059
3060 if (reachable(src)) {
3061 Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
3062 } else {
3063 lea(rscratch, src);
3064 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
3065 }
3066 }
3067
3068 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
3069 assert(rscratch != noreg || always_reachable(src), "missing");
3070
3071 if (reachable(src)) {
3072 Assembler::vbroadcastss(dst, as_Address(src), vector_len);
3073 } else {
3074 lea(rscratch, src);
3075 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
3076 }
3077 }
3078
3079 // Vector float blend
3080 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3081 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3082 // WARN: Allow dst == (src1|src2), mask == scratch
3083 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3084 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3085 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
3086 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3087 if (blend_emulation && scratch_available && dst_available) {
3088 if (compute_mask) {
3089 vpsrad(scratch, mask, 32, vector_len);
3090 mask = scratch;
3091 }
3092 if (dst == src1) {
3093 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1
3094 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3095 } else {
3096 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3097 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
3098 }
3099 vpor(dst, dst, scratch, vector_len);
3100 } else {
3101 Assembler::vblendvps(dst, src1, src2, mask, vector_len);
3102 }
3103 }
3104
3105 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3106 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3107 // WARN: Allow dst == (src1|src2), mask == scratch
3108 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3109 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3110 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
3111 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3112 if (blend_emulation && scratch_available && dst_available) {
3113 if (compute_mask) {
3114 vpxor(scratch, scratch, scratch, vector_len);
3115 vpcmpgtq(scratch, scratch, mask, vector_len);
3116 mask = scratch;
3117 }
3118 if (dst == src1) {
3119 vpandn(dst, mask, src1, vector_len); // if mask == 0, src
3120 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3121 } else {
3122 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3123 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
3124 }
3125 vpor(dst, dst, scratch, vector_len);
3126 } else {
3127 Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
3128 }
3129 }
3130
3131 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3132 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3133 Assembler::vpcmpeqb(dst, nds, src, vector_len);
3134 }
3135
3136 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
3137 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3138 Assembler::vpcmpeqb(dst, src1, src2, vector_len);
3139 }
3140
3141 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3142 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3143 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3144 }
3145
3146 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3147 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3148 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3149 }
3150
3151 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3152 assert(rscratch != noreg || always_reachable(src), "missing");
3153
3154 if (reachable(src)) {
3155 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
3156 } else {
3157 lea(rscratch, src);
3158 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
3159 }
3160 }
3161
3162 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3163 int comparison, bool is_signed, int vector_len, Register rscratch) {
3164 assert(rscratch != noreg || always_reachable(src), "missing");
3165
3166 if (reachable(src)) {
3167 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3168 } else {
3169 lea(rscratch, src);
3170 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3171 }
3172 }
3173
3174 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3175 int comparison, bool is_signed, int vector_len, Register rscratch) {
3176 assert(rscratch != noreg || always_reachable(src), "missing");
3177
3178 if (reachable(src)) {
3179 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3180 } else {
3181 lea(rscratch, src);
3182 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3183 }
3184 }
3185
3186 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3187 int comparison, bool is_signed, int vector_len, Register rscratch) {
3188 assert(rscratch != noreg || always_reachable(src), "missing");
3189
3190 if (reachable(src)) {
3191 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3192 } else {
3193 lea(rscratch, src);
3194 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3195 }
3196 }
3197
3198 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3199 int comparison, bool is_signed, int vector_len, Register rscratch) {
3200 assert(rscratch != noreg || always_reachable(src), "missing");
3201
3202 if (reachable(src)) {
3203 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3204 } else {
3205 lea(rscratch, src);
3206 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3207 }
3208 }
3209
3210 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
3211 if (width == Assembler::Q) {
3212 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
3213 } else {
3214 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
3215 }
3216 }
3217
3218 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
3219 int eq_cond_enc = 0x29;
3220 int gt_cond_enc = 0x37;
3221 if (width != Assembler::Q) {
3222 eq_cond_enc = 0x74 + width;
3223 gt_cond_enc = 0x64 + width;
3224 }
3225 switch (cond) {
3226 case eq:
3227 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3228 break;
3229 case neq:
3230 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3231 vallones(xtmp, vector_len);
3232 vpxor(dst, xtmp, dst, vector_len);
3233 break;
3234 case le:
3235 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3236 vallones(xtmp, vector_len);
3237 vpxor(dst, xtmp, dst, vector_len);
3238 break;
3239 case nlt:
3240 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3241 vallones(xtmp, vector_len);
3242 vpxor(dst, xtmp, dst, vector_len);
3243 break;
3244 case lt:
3245 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3246 break;
3247 case nle:
3248 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3249 break;
3250 default:
3251 assert(false, "Should not reach here");
3252 }
3253 }
3254
3255 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3256 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3257 Assembler::vpmovzxbw(dst, src, vector_len);
3258 }
3259
3260 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
3261 assert((src->encoding() < 16),"XMM register should be 0-15");
3262 Assembler::vpmovmskb(dst, src, vector_len);
3263 }
3264
3265 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3266 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3267 Assembler::vpmullw(dst, nds, src, vector_len);
3268 }
3269
3270 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3271 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3272 Assembler::vpmullw(dst, nds, src, vector_len);
3273 }
3274
3275 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3276 assert((UseAVX > 0), "AVX support is needed");
3277 assert(rscratch != noreg || always_reachable(src), "missing");
3278
3279 if (reachable(src)) {
3280 Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
3281 } else {
3282 lea(rscratch, src);
3283 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
3284 }
3285 }
3286
3287 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3288 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3289 Assembler::vpsubb(dst, nds, src, vector_len);
3290 }
3291
3292 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3293 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3294 Assembler::vpsubb(dst, nds, src, vector_len);
3295 }
3296
3297 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3298 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3299 Assembler::vpsubw(dst, nds, src, vector_len);
3300 }
3301
3302 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3303 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3304 Assembler::vpsubw(dst, nds, src, vector_len);
3305 }
3306
3307 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3308 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3309 Assembler::vpsraw(dst, nds, shift, vector_len);
3310 }
3311
3312 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3313 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3314 Assembler::vpsraw(dst, nds, shift, vector_len);
3315 }
3316
3317 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3318 assert(UseAVX > 2,"");
3319 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3320 vector_len = 2;
3321 }
3322 Assembler::evpsraq(dst, nds, shift, vector_len);
3323 }
3324
3325 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3326 assert(UseAVX > 2,"");
3327 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3328 vector_len = 2;
3329 }
3330 Assembler::evpsraq(dst, nds, shift, vector_len);
3331 }
3332
3333 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3334 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3335 Assembler::vpsrlw(dst, nds, shift, vector_len);
3336 }
3337
3338 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3339 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3340 Assembler::vpsrlw(dst, nds, shift, vector_len);
3341 }
3342
3343 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3344 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3345 Assembler::vpsllw(dst, nds, shift, vector_len);
3346 }
3347
3348 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3349 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3350 Assembler::vpsllw(dst, nds, shift, vector_len);
3351 }
3352
3353 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
3354 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
3355 Assembler::vptest(dst, src);
3356 }
3357
3358 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3359 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3360 Assembler::punpcklbw(dst, src);
3361 }
3362
3363 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
3364 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
3365 Assembler::pshufd(dst, src, mode);
3366 }
3367
3368 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
3369 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3370 Assembler::pshuflw(dst, src, mode);
3371 }
3372
3373 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3374 assert(rscratch != noreg || always_reachable(src), "missing");
3375
3376 if (reachable(src)) {
3377 vandpd(dst, nds, as_Address(src), vector_len);
3378 } else {
3379 lea(rscratch, src);
3380 vandpd(dst, nds, Address(rscratch, 0), vector_len);
3381 }
3382 }
3383
3384 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3385 assert(rscratch != noreg || always_reachable(src), "missing");
3386
3387 if (reachable(src)) {
3388 vandps(dst, nds, as_Address(src), vector_len);
3389 } else {
3390 lea(rscratch, src);
3391 vandps(dst, nds, Address(rscratch, 0), vector_len);
3392 }
3393 }
3394
3395 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
3396 bool merge, int vector_len, Register rscratch) {
3397 assert(rscratch != noreg || always_reachable(src), "missing");
3398
3399 if (reachable(src)) {
3400 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
3401 } else {
3402 lea(rscratch, src);
3403 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
3404 }
3405 }
3406
3407 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3408 assert(rscratch != noreg || always_reachable(src), "missing");
3409
3410 if (reachable(src)) {
3411 vdivsd(dst, nds, as_Address(src));
3412 } else {
3413 lea(rscratch, src);
3414 vdivsd(dst, nds, Address(rscratch, 0));
3415 }
3416 }
3417
3418 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3419 assert(rscratch != noreg || always_reachable(src), "missing");
3420
3421 if (reachable(src)) {
3422 vdivss(dst, nds, as_Address(src));
3423 } else {
3424 lea(rscratch, src);
3425 vdivss(dst, nds, Address(rscratch, 0));
3426 }
3427 }
3428
3429 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3430 assert(rscratch != noreg || always_reachable(src), "missing");
3431
3432 if (reachable(src)) {
3433 vmulsd(dst, nds, as_Address(src));
3434 } else {
3435 lea(rscratch, src);
3436 vmulsd(dst, nds, Address(rscratch, 0));
3437 }
3438 }
3439
3440 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3441 assert(rscratch != noreg || always_reachable(src), "missing");
3442
3443 if (reachable(src)) {
3444 vmulss(dst, nds, as_Address(src));
3445 } else {
3446 lea(rscratch, src);
3447 vmulss(dst, nds, Address(rscratch, 0));
3448 }
3449 }
3450
3451 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3452 assert(rscratch != noreg || always_reachable(src), "missing");
3453
3454 if (reachable(src)) {
3455 vsubsd(dst, nds, as_Address(src));
3456 } else {
3457 lea(rscratch, src);
3458 vsubsd(dst, nds, Address(rscratch, 0));
3459 }
3460 }
3461
3462 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3463 assert(rscratch != noreg || always_reachable(src), "missing");
3464
3465 if (reachable(src)) {
3466 vsubss(dst, nds, as_Address(src));
3467 } else {
3468 lea(rscratch, src);
3469 vsubss(dst, nds, Address(rscratch, 0));
3470 }
3471 }
3472
3473 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3474 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3475 assert(rscratch != noreg || always_reachable(src), "missing");
3476
3477 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
3478 }
3479
3480 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3481 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3482 assert(rscratch != noreg || always_reachable(src), "missing");
3483
3484 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
3485 }
3486
3487 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3488 assert(rscratch != noreg || always_reachable(src), "missing");
3489
3490 if (reachable(src)) {
3491 vxorpd(dst, nds, as_Address(src), vector_len);
3492 } else {
3493 lea(rscratch, src);
3494 vxorpd(dst, nds, Address(rscratch, 0), vector_len);
3495 }
3496 }
3497
3498 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3499 assert(rscratch != noreg || always_reachable(src), "missing");
3500
3501 if (reachable(src)) {
3502 vxorps(dst, nds, as_Address(src), vector_len);
3503 } else {
3504 lea(rscratch, src);
3505 vxorps(dst, nds, Address(rscratch, 0), vector_len);
3506 }
3507 }
3508
3509 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3510 assert(rscratch != noreg || always_reachable(src), "missing");
3511
3512 if (UseAVX > 1 || (vector_len < 1)) {
3513 if (reachable(src)) {
3514 Assembler::vpxor(dst, nds, as_Address(src), vector_len);
3515 } else {
3516 lea(rscratch, src);
3517 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
3518 }
3519 } else {
3520 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
3521 }
3522 }
3523
3524 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3525 assert(rscratch != noreg || always_reachable(src), "missing");
3526
3527 if (reachable(src)) {
3528 Assembler::vpermd(dst, nds, as_Address(src), vector_len);
3529 } else {
3530 lea(rscratch, src);
3531 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
3532 }
3533 }
3534
3535 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
3536 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
3537 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
3538 // The inverted mask is sign-extended
3539 andptr(possibly_non_local, inverted_mask);
3540 }
3541
3542 void MacroAssembler::resolve_jobject(Register value,
3543 Register tmp) {
3544 Register thread = r15_thread;
3545 assert_different_registers(value, thread, tmp);
3546 Label done, tagged, weak_tagged;
3547 testptr(value, value);
3548 jcc(Assembler::zero, done); // Use null as-is.
3549 testptr(value, JNIHandles::tag_mask); // Test for tag.
3550 jcc(Assembler::notZero, tagged);
3551
3552 // Resolve local handle
3553 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
3554 verify_oop(value);
3555 jmp(done);
3556
3557 bind(tagged);
3558 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
3559 jcc(Assembler::notZero, weak_tagged);
3560
3561 // Resolve global handle
3562 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3563 verify_oop(value);
3564 jmp(done);
3565
3566 bind(weak_tagged);
3567 // Resolve jweak.
3568 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3569 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
3570 verify_oop(value);
3571
3572 bind(done);
3573 }
3574
3575 void MacroAssembler::resolve_global_jobject(Register value,
3576 Register tmp) {
3577 Register thread = r15_thread;
3578 assert_different_registers(value, thread, tmp);
3579 Label done;
3580
3581 testptr(value, value);
3582 jcc(Assembler::zero, done); // Use null as-is.
3583
3584 #ifdef ASSERT
3585 {
3586 Label valid_global_tag;
3587 testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
3588 jcc(Assembler::notZero, valid_global_tag);
3589 stop("non global jobject using resolve_global_jobject");
3590 bind(valid_global_tag);
3591 }
3592 #endif
3593
3594 // Resolve global handle
3595 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3596 verify_oop(value);
3597
3598 bind(done);
3599 }
3600
3601 void MacroAssembler::subptr(Register dst, int32_t imm32) {
3602 subq(dst, imm32);
3603 }
3604
3605 // Force generation of a 4 byte immediate value even if it fits into 8bit
3606 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
3607 subq_imm32(dst, imm32);
3608 }
3609
3610 void MacroAssembler::subptr(Register dst, Register src) {
3611 subq(dst, src);
3612 }
3613
3614 // C++ bool manipulation
3615 void MacroAssembler::testbool(Register dst) {
3616 if(sizeof(bool) == 1)
3617 testb(dst, 0xff);
3618 else if(sizeof(bool) == 2) {
3619 // testw implementation needed for two byte bools
3620 ShouldNotReachHere();
3621 } else if(sizeof(bool) == 4)
3622 testl(dst, dst);
3623 else
3624 // unsupported
3625 ShouldNotReachHere();
3626 }
3627
3628 void MacroAssembler::testptr(Register dst, Register src) {
3629 testq(dst, src);
3630 }
3631
3632 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3633 void MacroAssembler::tlab_allocate(Register obj,
3634 Register var_size_in_bytes,
3635 int con_size_in_bytes,
3636 Register t1,
3637 Register t2,
3638 Label& slow_case) {
3639 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3640 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
3641 }
3642
3643 RegSet MacroAssembler::call_clobbered_gp_registers() {
3644 RegSet regs;
3645 regs += RegSet::of(rax, rcx, rdx);
3646 #ifndef _WINDOWS
3647 regs += RegSet::of(rsi, rdi);
3648 #endif
3649 regs += RegSet::range(r8, r11);
3650 if (UseAPX) {
3651 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
3652 }
3653 return regs;
3654 }
3655
3656 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
3657 int num_xmm_registers = XMMRegister::available_xmm_registers();
3658 #if defined(_WINDOWS)
3659 XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
3660 if (num_xmm_registers > 16) {
3661 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
3662 }
3663 return result;
3664 #else
3665 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
3666 #endif
3667 }
3668
3669 // C1 only ever uses the first double/float of the XMM register.
3670 static int xmm_save_size() { return sizeof(double); }
3671
3672 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3673 masm->movdbl(Address(rsp, offset), reg);
3674 }
3675
3676 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3677 masm->movdbl(reg, Address(rsp, offset));
3678 }
3679
3680 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
3681 bool save_fpu, int& gp_area_size, int& xmm_area_size) {
3682
3683 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
3684 StackAlignmentInBytes);
3685 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
3686
3687 return gp_area_size + xmm_area_size;
3688 }
3689
3690 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
3691 block_comment("push_call_clobbered_registers start");
3692 // Regular registers
3693 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
3694
3695 int gp_area_size;
3696 int xmm_area_size;
3697 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
3698 gp_area_size, xmm_area_size);
3699 subptr(rsp, total_save_size);
3700
3701 push_set(gp_registers_to_push, 0);
3702
3703 if (save_fpu) {
3704 push_set(call_clobbered_xmm_registers(), gp_area_size);
3705 }
3706
3707 block_comment("push_call_clobbered_registers end");
3708 }
3709
3710 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
3711 block_comment("pop_call_clobbered_registers start");
3712
3713 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
3714
3715 int gp_area_size;
3716 int xmm_area_size;
3717 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
3718 gp_area_size, xmm_area_size);
3719
3720 if (restore_fpu) {
3721 pop_set(call_clobbered_xmm_registers(), gp_area_size);
3722 }
3723
3724 pop_set(gp_registers_to_pop, 0);
3725
3726 addptr(rsp, total_save_size);
3727
3728 vzeroupper();
3729
3730 block_comment("pop_call_clobbered_registers end");
3731 }
3732
3733 void MacroAssembler::push_set(XMMRegSet set, int offset) {
3734 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
3735 int spill_offset = offset;
3736
3737 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
3738 save_xmm_register(this, spill_offset, *it);
3739 spill_offset += xmm_save_size();
3740 }
3741 }
3742
3743 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
3744 int restore_size = set.size() * xmm_save_size();
3745 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
3746
3747 int restore_offset = offset + restore_size - xmm_save_size();
3748
3749 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
3750 restore_xmm_register(this, restore_offset, *it);
3751 restore_offset -= xmm_save_size();
3752 }
3753 }
3754
3755 void MacroAssembler::push_set(RegSet set, int offset) {
3756 int spill_offset;
3757 if (offset == -1) {
3758 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3759 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
3760 subptr(rsp, aligned_size);
3761 spill_offset = 0;
3762 } else {
3763 spill_offset = offset;
3764 }
3765
3766 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
3767 movptr(Address(rsp, spill_offset), *it);
3768 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3769 }
3770 }
3771
3772 void MacroAssembler::pop_set(RegSet set, int offset) {
3773
3774 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3775 int restore_size = set.size() * gp_reg_size;
3776 int aligned_size = align_up(restore_size, StackAlignmentInBytes);
3777
3778 int restore_offset;
3779 if (offset == -1) {
3780 restore_offset = restore_size - gp_reg_size;
3781 } else {
3782 restore_offset = offset + restore_size - gp_reg_size;
3783 }
3784 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
3785 movptr(*it, Address(rsp, restore_offset));
3786 restore_offset -= gp_reg_size;
3787 }
3788
3789 if (offset == -1) {
3790 addptr(rsp, aligned_size);
3791 }
3792 }
3793
3794 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3795 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3796 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3797 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3798 Label done;
3799
3800 testptr(length_in_bytes, length_in_bytes);
3801 jcc(Assembler::zero, done);
3802
3803 // initialize topmost word, divide index by 2, check if odd and test if zero
3804 // note: for the remaining code to work, index must be a multiple of BytesPerWord
3805 #ifdef ASSERT
3806 {
3807 Label L;
3808 testptr(length_in_bytes, BytesPerWord - 1);
3809 jcc(Assembler::zero, L);
3810 stop("length must be a multiple of BytesPerWord");
3811 bind(L);
3812 }
3813 #endif
3814 Register index = length_in_bytes;
3815 xorptr(temp, temp); // use _zero reg to clear memory (shorter code)
3816 if (UseIncDec) {
3817 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
3818 } else {
3819 shrptr(index, 2); // use 2 instructions to avoid partial flag stall
3820 shrptr(index, 1);
3821 }
3822
3823 // initialize remaining object fields: index is a multiple of 2 now
3824 {
3825 Label loop;
3826 bind(loop);
3827 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
3828 decrement(index);
3829 jcc(Assembler::notZero, loop);
3830 }
3831
3832 bind(done);
3833 }
3834
3835 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
3836 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
3837 #ifdef ASSERT
3838 {
3839 Label done;
3840 cmpptr(layout_info, 0);
3841 jcc(Assembler::notEqual, done);
3842 stop("inline_layout_info_array is null");
3843 bind(done);
3844 }
3845 #endif
3846
3847 InlineLayoutInfo array[2];
3848 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
3849 if (is_power_of_2(size)) {
3850 shll(index, log2i_exact(size)); // Scale index by power of 2
3851 } else {
3852 imull(index, index, size); // Scale the index to be the entry index * array_element_size
3853 }
3854 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
3855 }
3856
3857 // Look up the method for a megamorphic invokeinterface call.
3858 // The target method is determined by <intf_klass, itable_index>.
3859 // The receiver klass is in recv_klass.
3860 // On success, the result will be in method_result, and execution falls through.
3861 // On failure, execution transfers to the given label.
3862 void MacroAssembler::lookup_interface_method(Register recv_klass,
3863 Register intf_klass,
3864 RegisterOrConstant itable_index,
3865 Register method_result,
3866 Register scan_temp,
3867 Label& L_no_such_interface,
3868 bool return_method) {
3869 assert_different_registers(recv_klass, intf_klass, scan_temp);
3870 assert_different_registers(method_result, intf_klass, scan_temp);
3871 assert(recv_klass != method_result || !return_method,
3872 "recv_klass can be destroyed when method isn't needed");
3873
3874 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3875 "caller must use same register for non-constant itable index as for method");
3876
3877 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3878 int vtable_base = in_bytes(Klass::vtable_start_offset());
3879 int itentry_off = in_bytes(itableMethodEntry::method_offset());
3880 int scan_step = itableOffsetEntry::size() * wordSize;
3881 int vte_size = vtableEntry::size_in_bytes();
3882 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3883 assert(vte_size == wordSize, "else adjust times_vte_scale");
3884
3885 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
3886
3887 // Could store the aligned, prescaled offset in the klass.
3888 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
3889
3890 if (return_method) {
3891 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
3892 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
3893 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
3894 }
3895
3896 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
3897 // if (scan->interface() == intf) {
3898 // result = (klass + scan->offset() + itable_index);
3899 // }
3900 // }
3901 Label search, found_method;
3902
3903 for (int peel = 1; peel >= 0; peel--) {
3904 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
3905 cmpptr(intf_klass, method_result);
3906
3907 if (peel) {
3908 jccb(Assembler::equal, found_method);
3909 } else {
3910 jccb(Assembler::notEqual, search);
3911 // (invert the test to fall through to found_method...)
3912 }
3913
3914 if (!peel) break;
3915
3916 bind(search);
3917
3918 // Check that the previous entry is non-null. A null entry means that
3919 // the receiver class doesn't implement the interface, and wasn't the
3920 // same as when the caller was compiled.
3921 testptr(method_result, method_result);
3922 jcc(Assembler::zero, L_no_such_interface);
3923 addptr(scan_temp, scan_step);
3924 }
3925
3926 bind(found_method);
3927
3928 if (return_method) {
3929 // Got a hit.
3930 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
3931 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
3932 }
3933 }
3934
3935 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
3936 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
3937 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
3938 // The target method is determined by <holder_klass, itable_index>.
3939 // The receiver klass is in recv_klass.
3940 // On success, the result will be in method_result, and execution falls through.
3941 // On failure, execution transfers to the given label.
3942 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
3943 Register holder_klass,
3944 Register resolved_klass,
3945 Register method_result,
3946 Register scan_temp,
3947 Register temp_reg2,
3948 Register receiver,
3949 int itable_index,
3950 Label& L_no_such_interface) {
3951 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
3952 Register temp_itbl_klass = method_result;
3953 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
3954
3955 int vtable_base = in_bytes(Klass::vtable_start_offset());
3956 int itentry_off = in_bytes(itableMethodEntry::method_offset());
3957 int scan_step = itableOffsetEntry::size() * wordSize;
3958 int vte_size = vtableEntry::size_in_bytes();
3959 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
3960 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
3961 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3962 assert(vte_size == wordSize, "adjust times_vte_scale");
3963
3964 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
3965
3966 // temp_itbl_klass = recv_klass.itable[0]
3967 // scan_temp = &recv_klass.itable[0] + step
3968 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
3969 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
3970 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
3971 xorptr(temp_reg, temp_reg);
3972
3973 // Initial checks:
3974 // - if (holder_klass != resolved_klass), go to "scan for resolved"
3975 // - if (itable[0] == 0), no such interface
3976 // - if (itable[0] == holder_klass), shortcut to "holder found"
3977 cmpptr(holder_klass, resolved_klass);
3978 jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
3979 testptr(temp_itbl_klass, temp_itbl_klass);
3980 jccb(Assembler::zero, L_no_such_interface);
3981 cmpptr(holder_klass, temp_itbl_klass);
3982 jccb(Assembler::equal, L_holder_found);
3983
3984 // Loop: Look for holder_klass record in itable
3985 // do {
3986 // tmp = itable[index];
3987 // index += step;
3988 // if (tmp == holder_klass) {
3989 // goto L_holder_found; // Found!
3990 // }
3991 // } while (tmp != 0);
3992 // goto L_no_such_interface // Not found.
3993 Label L_scan_holder;
3994 bind(L_scan_holder);
3995 movptr(temp_itbl_klass, Address(scan_temp, 0));
3996 addptr(scan_temp, scan_step);
3997 cmpptr(holder_klass, temp_itbl_klass);
3998 jccb(Assembler::equal, L_holder_found);
3999 testptr(temp_itbl_klass, temp_itbl_klass);
4000 jccb(Assembler::notZero, L_scan_holder);
4001
4002 jmpb(L_no_such_interface);
4003
4004 // Loop: Look for resolved_class record in itable
4005 // do {
4006 // tmp = itable[index];
4007 // index += step;
4008 // if (tmp == holder_klass) {
4009 // // Also check if we have met a holder klass
4010 // holder_tmp = itable[index-step-ioffset];
4011 // }
4012 // if (tmp == resolved_klass) {
4013 // goto L_resolved_found; // Found!
4014 // }
4015 // } while (tmp != 0);
4016 // goto L_no_such_interface // Not found.
4017 //
4018 Label L_loop_scan_resolved;
4019 bind(L_loop_scan_resolved);
4020 movptr(temp_itbl_klass, Address(scan_temp, 0));
4021 addptr(scan_temp, scan_step);
4022 bind(L_loop_scan_resolved_entry);
4023 cmpptr(holder_klass, temp_itbl_klass);
4024 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4025 cmpptr(resolved_klass, temp_itbl_klass);
4026 jccb(Assembler::equal, L_resolved_found);
4027 testptr(temp_itbl_klass, temp_itbl_klass);
4028 jccb(Assembler::notZero, L_loop_scan_resolved);
4029
4030 jmpb(L_no_such_interface);
4031
4032 Label L_ready;
4033
4034 // See if we already have a holder klass. If not, go and scan for it.
4035 bind(L_resolved_found);
4036 testptr(temp_reg, temp_reg);
4037 jccb(Assembler::zero, L_scan_holder);
4038 jmpb(L_ready);
4039
4040 bind(L_holder_found);
4041 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4042
4043 // Finally, temp_reg contains holder_klass vtable offset
4044 bind(L_ready);
4045 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4046 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
4047 load_klass(scan_temp, receiver, noreg);
4048 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4049 } else {
4050 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4051 }
4052 }
4053
4054
4055 // virtual method calling
4056 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4057 RegisterOrConstant vtable_index,
4058 Register method_result) {
4059 const ByteSize base = Klass::vtable_start_offset();
4060 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4061 Address vtable_entry_addr(recv_klass,
4062 vtable_index, Address::times_ptr,
4063 base + vtableEntry::method_offset());
4064 movptr(method_result, vtable_entry_addr);
4065 }
4066
4067
4068 void MacroAssembler::check_klass_subtype(Register sub_klass,
4069 Register super_klass,
4070 Register temp_reg,
4071 Label& L_success) {
4072 Label L_failure;
4073 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
4074 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
4075 bind(L_failure);
4076 }
4077
4078
4079 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4080 Register super_klass,
4081 Register temp_reg,
4082 Label* L_success,
4083 Label* L_failure,
4084 Label* L_slow_path,
4085 RegisterOrConstant super_check_offset) {
4086 assert_different_registers(sub_klass, super_klass, temp_reg);
4087 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4088 if (super_check_offset.is_register()) {
4089 assert_different_registers(sub_klass, super_klass,
4090 super_check_offset.as_register());
4091 } else if (must_load_sco) {
4092 assert(temp_reg != noreg, "supply either a temp or a register offset");
4093 }
4094
4095 Label L_fallthrough;
4096 int label_nulls = 0;
4097 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4098 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4099 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
4100 assert(label_nulls <= 1, "at most one null in the batch");
4101
4102 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4103 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4104 Address super_check_offset_addr(super_klass, sco_offset);
4105
4106 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4107 // range of a jccb. If this routine grows larger, reconsider at
4108 // least some of these.
4109 #define local_jcc(assembler_cond, label) \
4110 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4111 else jcc( assembler_cond, label) /*omit semi*/
4112
4113 // Hacked jmp, which may only be used just before L_fallthrough.
4114 #define final_jmp(label) \
4115 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4116 else jmp(label) /*omit semi*/
4117
4118 // If the pointers are equal, we are done (e.g., String[] elements).
4119 // This self-check enables sharing of secondary supertype arrays among
4120 // non-primary types such as array-of-interface. Otherwise, each such
4121 // type would need its own customized SSA.
4122 // We move this check to the front of the fast path because many
4123 // type checks are in fact trivially successful in this manner,
4124 // so we get a nicely predicted branch right at the start of the check.
4125 cmpptr(sub_klass, super_klass);
4126 local_jcc(Assembler::equal, *L_success);
4127
4128 // Check the supertype display:
4129 if (must_load_sco) {
4130 // Positive movl does right thing on LP64.
4131 movl(temp_reg, super_check_offset_addr);
4132 super_check_offset = RegisterOrConstant(temp_reg);
4133 }
4134 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4135 cmpptr(super_klass, super_check_addr); // load displayed supertype
4136
4137 // This check has worked decisively for primary supers.
4138 // Secondary supers are sought in the super_cache ('super_cache_addr').
4139 // (Secondary supers are interfaces and very deeply nested subtypes.)
4140 // This works in the same check above because of a tricky aliasing
4141 // between the super_cache and the primary super display elements.
4142 // (The 'super_check_addr' can address either, as the case requires.)
4143 // Note that the cache is updated below if it does not help us find
4144 // what we need immediately.
4145 // So if it was a primary super, we can just fail immediately.
4146 // Otherwise, it's the slow path for us (no success at this point).
4147
4148 if (super_check_offset.is_register()) {
4149 local_jcc(Assembler::equal, *L_success);
4150 cmpl(super_check_offset.as_register(), sc_offset);
4151 if (L_failure == &L_fallthrough) {
4152 local_jcc(Assembler::equal, *L_slow_path);
4153 } else {
4154 local_jcc(Assembler::notEqual, *L_failure);
4155 final_jmp(*L_slow_path);
4156 }
4157 } else if (super_check_offset.as_constant() == sc_offset) {
4158 // Need a slow path; fast failure is impossible.
4159 if (L_slow_path == &L_fallthrough) {
4160 local_jcc(Assembler::equal, *L_success);
4161 } else {
4162 local_jcc(Assembler::notEqual, *L_slow_path);
4163 final_jmp(*L_success);
4164 }
4165 } else {
4166 // No slow path; it's a fast decision.
4167 if (L_failure == &L_fallthrough) {
4168 local_jcc(Assembler::equal, *L_success);
4169 } else {
4170 local_jcc(Assembler::notEqual, *L_failure);
4171 final_jmp(*L_success);
4172 }
4173 }
4174
4175 bind(L_fallthrough);
4176
4177 #undef local_jcc
4178 #undef final_jmp
4179 }
4180
4181
4182 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
4183 Register super_klass,
4184 Register temp_reg,
4185 Register temp2_reg,
4186 Label* L_success,
4187 Label* L_failure,
4188 bool set_cond_codes) {
4189 assert_different_registers(sub_klass, super_klass, temp_reg);
4190 if (temp2_reg != noreg)
4191 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4192 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4193
4194 Label L_fallthrough;
4195 int label_nulls = 0;
4196 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4197 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4198 assert(label_nulls <= 1, "at most one null in the batch");
4199
4200 // a couple of useful fields in sub_klass:
4201 int ss_offset = in_bytes(Klass::secondary_supers_offset());
4202 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4203 Address secondary_supers_addr(sub_klass, ss_offset);
4204 Address super_cache_addr( sub_klass, sc_offset);
4205
4206 // Do a linear scan of the secondary super-klass chain.
4207 // This code is rarely used, so simplicity is a virtue here.
4208 // The repne_scan instruction uses fixed registers, which we must spill.
4209 // Don't worry too much about pre-existing connections with the input regs.
4210
4211 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
4212 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
4213
4214 // Get super_klass value into rax (even if it was in rdi or rcx).
4215 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
4216 if (super_klass != rax) {
4217 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
4218 mov(rax, super_klass);
4219 }
4220 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
4221 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
4222
4223 #ifndef PRODUCT
4224 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
4225 ExternalAddress pst_counter_addr((address) pst_counter);
4226 lea(rcx, pst_counter_addr);
4227 incrementl(Address(rcx, 0));
4228 #endif //PRODUCT
4229
4230 // We will consult the secondary-super array.
4231 movptr(rdi, secondary_supers_addr);
4232 // Load the array length. (Positive movl does right thing on LP64.)
4233 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
4234 // Skip to start of data.
4235 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
4236
4237 // Scan RCX words at [RDI] for an occurrence of RAX.
4238 // Set NZ/Z based on last compare.
4239 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
4240 // not change flags (only scas instruction which is repeated sets flags).
4241 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
4242
4243 testptr(rax,rax); // Set Z = 0
4244 repne_scan();
4245
4246 // Unspill the temp. registers:
4247 if (pushed_rdi) pop(rdi);
4248 if (pushed_rcx) pop(rcx);
4249 if (pushed_rax) pop(rax);
4250
4251 if (set_cond_codes) {
4252 // Special hack for the AD files: rdi is guaranteed non-zero.
4253 assert(!pushed_rdi, "rdi must be left non-null");
4254 // Also, the condition codes are properly set Z/NZ on succeed/failure.
4255 }
4256
4257 if (L_failure == &L_fallthrough)
4258 jccb(Assembler::notEqual, *L_failure);
4259 else jcc(Assembler::notEqual, *L_failure);
4260
4261 // Success. Cache the super we found and proceed in triumph.
4262 movptr(super_cache_addr, super_klass);
4263
4264 if (L_success != &L_fallthrough) {
4265 jmp(*L_success);
4266 }
4267
4268 #undef IS_A_TEMP
4269
4270 bind(L_fallthrough);
4271 }
4272
4273 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4274 Register super_klass,
4275 Register temp_reg,
4276 Register temp2_reg,
4277 Label* L_success,
4278 Label* L_failure,
4279 bool set_cond_codes) {
4280 assert(set_cond_codes == false, "must be false on 64-bit x86");
4281 check_klass_subtype_slow_path
4282 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
4283 L_success, L_failure);
4284 }
4285
4286 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4287 Register super_klass,
4288 Register temp_reg,
4289 Register temp2_reg,
4290 Register temp3_reg,
4291 Register temp4_reg,
4292 Label* L_success,
4293 Label* L_failure) {
4294 if (UseSecondarySupersTable) {
4295 check_klass_subtype_slow_path_table
4296 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
4297 L_success, L_failure);
4298 } else {
4299 check_klass_subtype_slow_path_linear
4300 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
4301 }
4302 }
4303
4304 Register MacroAssembler::allocate_if_noreg(Register r,
4305 RegSetIterator<Register> &available_regs,
4306 RegSet ®s_to_push) {
4307 if (!r->is_valid()) {
4308 r = *available_regs++;
4309 regs_to_push += r;
4310 }
4311 return r;
4312 }
4313
4314 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
4315 Register super_klass,
4316 Register temp_reg,
4317 Register temp2_reg,
4318 Register temp3_reg,
4319 Register result_reg,
4320 Label* L_success,
4321 Label* L_failure) {
4322 // NB! Callers may assume that, when temp2_reg is a valid register,
4323 // this code sets it to a nonzero value.
4324 bool temp2_reg_was_valid = temp2_reg->is_valid();
4325
4326 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
4327
4328 Label L_fallthrough;
4329 int label_nulls = 0;
4330 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4331 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4332 assert(label_nulls <= 1, "at most one null in the batch");
4333
4334 BLOCK_COMMENT("check_klass_subtype_slow_path_table");
4335
4336 RegSetIterator<Register> available_regs
4337 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
4338
4339 RegSet pushed_regs;
4340
4341 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
4342 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
4343 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
4344 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
4345 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
4346
4347 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
4348
4349 {
4350
4351 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
4352 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
4353 subptr(rsp, aligned_size);
4354 push_set(pushed_regs, 0);
4355
4356 lookup_secondary_supers_table_var(sub_klass,
4357 super_klass,
4358 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
4359 cmpq(result_reg, 0);
4360
4361 // Unspill the temp. registers:
4362 pop_set(pushed_regs, 0);
4363 // Increment SP but do not clobber flags.
4364 lea(rsp, Address(rsp, aligned_size));
4365 }
4366
4367 if (temp2_reg_was_valid) {
4368 movq(temp2_reg, 1);
4369 }
4370
4371 jcc(Assembler::notEqual, *L_failure);
4372
4373 if (L_success != &L_fallthrough) {
4374 jmp(*L_success);
4375 }
4376
4377 bind(L_fallthrough);
4378 }
4379
4380 // population_count variant for running without the POPCNT
4381 // instruction, which was introduced with SSE4.2 in 2008.
4382 void MacroAssembler::population_count(Register dst, Register src,
4383 Register scratch1, Register scratch2) {
4384 assert_different_registers(src, scratch1, scratch2);
4385 if (UsePopCountInstruction) {
4386 Assembler::popcntq(dst, src);
4387 } else {
4388 assert_different_registers(src, scratch1, scratch2);
4389 assert_different_registers(dst, scratch1, scratch2);
4390 Label loop, done;
4391
4392 mov(scratch1, src);
4393 // dst = 0;
4394 // while(scratch1 != 0) {
4395 // dst++;
4396 // scratch1 &= (scratch1 - 1);
4397 // }
4398 xorl(dst, dst);
4399 testq(scratch1, scratch1);
4400 jccb(Assembler::equal, done);
4401 {
4402 bind(loop);
4403 incq(dst);
4404 movq(scratch2, scratch1);
4405 decq(scratch2);
4406 andq(scratch1, scratch2);
4407 jccb(Assembler::notEqual, loop);
4408 }
4409 bind(done);
4410 }
4411 #ifdef ASSERT
4412 mov64(scratch1, 0xCafeBabeDeadBeef);
4413 movq(scratch2, scratch1);
4414 #endif
4415 }
4416
4417 // Ensure that the inline code and the stub are using the same registers.
4418 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
4419 do { \
4420 assert(r_super_klass == rax, "mismatch"); \
4421 assert(r_array_base == rbx, "mismatch"); \
4422 assert(r_array_length == rcx, "mismatch"); \
4423 assert(r_array_index == rdx, "mismatch"); \
4424 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \
4425 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \
4426 assert(result == rdi || result == noreg, "mismatch"); \
4427 } while(0)
4428
4429 // Versions of salq and rorq that don't need count to be in rcx
4430
4431 void MacroAssembler::salq(Register dest, Register count) {
4432 if (count == rcx) {
4433 Assembler::salq(dest);
4434 } else {
4435 assert_different_registers(rcx, dest);
4436 xchgq(rcx, count);
4437 Assembler::salq(dest);
4438 xchgq(rcx, count);
4439 }
4440 }
4441
4442 void MacroAssembler::rorq(Register dest, Register count) {
4443 if (count == rcx) {
4444 Assembler::rorq(dest);
4445 } else {
4446 assert_different_registers(rcx, dest);
4447 xchgq(rcx, count);
4448 Assembler::rorq(dest);
4449 xchgq(rcx, count);
4450 }
4451 }
4452
4453 // Return true: we succeeded in generating this code
4454 //
4455 // At runtime, return 0 in result if r_super_klass is a superclass of
4456 // r_sub_klass, otherwise return nonzero. Use this if you know the
4457 // super_klass_slot of the class you're looking for. This is always
4458 // the case for instanceof and checkcast.
4459 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
4460 Register r_super_klass,
4461 Register temp1,
4462 Register temp2,
4463 Register temp3,
4464 Register temp4,
4465 Register result,
4466 u1 super_klass_slot) {
4467 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4468
4469 Label L_fallthrough, L_success, L_failure;
4470
4471 BLOCK_COMMENT("lookup_secondary_supers_table {");
4472
4473 const Register
4474 r_array_index = temp1,
4475 r_array_length = temp2,
4476 r_array_base = temp3,
4477 r_bitmap = temp4;
4478
4479 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
4480
4481 xorq(result, result); // = 0
4482
4483 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4484 movq(r_array_index, r_bitmap);
4485
4486 // First check the bitmap to see if super_klass might be present. If
4487 // the bit is zero, we are certain that super_klass is not one of
4488 // the secondary supers.
4489 u1 bit = super_klass_slot;
4490 {
4491 // NB: If the count in a x86 shift instruction is 0, the flags are
4492 // not affected, so we do a testq instead.
4493 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
4494 if (shift_count != 0) {
4495 salq(r_array_index, shift_count);
4496 } else {
4497 testq(r_array_index, r_array_index);
4498 }
4499 }
4500 // We test the MSB of r_array_index, i.e. its sign bit
4501 jcc(Assembler::positive, L_failure);
4502
4503 // Get the first array index that can contain super_klass into r_array_index.
4504 if (bit != 0) {
4505 population_count(r_array_index, r_array_index, temp2, temp3);
4506 } else {
4507 movl(r_array_index, 1);
4508 }
4509 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4510
4511 // We will consult the secondary-super array.
4512 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4513
4514 // We're asserting that the first word in an Array<Klass*> is the
4515 // length, and the second word is the first word of the data. If
4516 // that ever changes, r_array_base will have to be adjusted here.
4517 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4518 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4519
4520 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4521 jccb(Assembler::equal, L_success);
4522
4523 // Is there another entry to check? Consult the bitmap.
4524 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
4525 jccb(Assembler::carryClear, L_failure);
4526
4527 // Linear probe. Rotate the bitmap so that the next bit to test is
4528 // in Bit 1.
4529 if (bit != 0) {
4530 rorq(r_bitmap, bit);
4531 }
4532
4533 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4534 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4535 // Kills: r_array_length.
4536 // Returns: result.
4537 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
4538 // Result (0/1) is in rdi
4539 jmpb(L_fallthrough);
4540
4541 bind(L_failure);
4542 incq(result); // 0 => 1
4543
4544 bind(L_success);
4545 // result = 0;
4546
4547 bind(L_fallthrough);
4548 BLOCK_COMMENT("} lookup_secondary_supers_table");
4549
4550 if (VerifySecondarySupers) {
4551 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4552 temp1, temp2, temp3);
4553 }
4554 }
4555
4556 // At runtime, return 0 in result if r_super_klass is a superclass of
4557 // r_sub_klass, otherwise return nonzero. Use this version of
4558 // lookup_secondary_supers_table() if you don't know ahead of time
4559 // which superclass will be searched for. Used by interpreter and
4560 // runtime stubs. It is larger and has somewhat greater latency than
4561 // the version above, which takes a constant super_klass_slot.
4562 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
4563 Register r_super_klass,
4564 Register temp1,
4565 Register temp2,
4566 Register temp3,
4567 Register temp4,
4568 Register result) {
4569 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4570 assert_different_registers(r_sub_klass, r_super_klass, rcx);
4571 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
4572
4573 Label L_fallthrough, L_success, L_failure;
4574
4575 BLOCK_COMMENT("lookup_secondary_supers_table {");
4576
4577 RegSetIterator<Register> available_regs = (temps - rcx).begin();
4578
4579 // FIXME. Once we are sure that all paths reaching this point really
4580 // do pass rcx as one of our temps we can get rid of the following
4581 // workaround.
4582 assert(temps.contains(rcx), "fix this code");
4583
4584 // We prefer to have our shift count in rcx. If rcx is one of our
4585 // temps, use it for slot. If not, pick any of our temps.
4586 Register slot;
4587 if (!temps.contains(rcx)) {
4588 slot = *available_regs++;
4589 } else {
4590 slot = rcx;
4591 }
4592
4593 const Register r_array_index = *available_regs++;
4594 const Register r_bitmap = *available_regs++;
4595
4596 // The logic above guarantees this property, but we state it here.
4597 assert_different_registers(r_array_index, r_bitmap, rcx);
4598
4599 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4600 movq(r_array_index, r_bitmap);
4601
4602 // First check the bitmap to see if super_klass might be present. If
4603 // the bit is zero, we are certain that super_klass is not one of
4604 // the secondary supers.
4605 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4606 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
4607 salq(r_array_index, slot);
4608
4609 testq(r_array_index, r_array_index);
4610 // We test the MSB of r_array_index, i.e. its sign bit
4611 jcc(Assembler::positive, L_failure);
4612
4613 const Register r_array_base = *available_regs++;
4614
4615 // Get the first array index that can contain super_klass into r_array_index.
4616 // Note: Clobbers r_array_base and slot.
4617 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
4618
4619 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4620
4621 // We will consult the secondary-super array.
4622 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4623
4624 // We're asserting that the first word in an Array<Klass*> is the
4625 // length, and the second word is the first word of the data. If
4626 // that ever changes, r_array_base will have to be adjusted here.
4627 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4628 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4629
4630 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4631 jccb(Assembler::equal, L_success);
4632
4633 // Restore slot to its true value
4634 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4635
4636 // Linear probe. Rotate the bitmap so that the next bit to test is
4637 // in Bit 1.
4638 rorq(r_bitmap, slot);
4639
4640 // Is there another entry to check? Consult the bitmap.
4641 btq(r_bitmap, 1);
4642 jccb(Assembler::carryClear, L_failure);
4643
4644 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4645 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4646 // Kills: r_array_length.
4647 // Returns: result.
4648 lookup_secondary_supers_table_slow_path(r_super_klass,
4649 r_array_base,
4650 r_array_index,
4651 r_bitmap,
4652 /*temp1*/result,
4653 /*temp2*/slot,
4654 &L_success,
4655 nullptr);
4656
4657 bind(L_failure);
4658 movq(result, 1);
4659 jmpb(L_fallthrough);
4660
4661 bind(L_success);
4662 xorq(result, result); // = 0
4663
4664 bind(L_fallthrough);
4665 BLOCK_COMMENT("} lookup_secondary_supers_table");
4666
4667 if (VerifySecondarySupers) {
4668 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4669 temp1, temp2, temp3);
4670 }
4671 }
4672
4673 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
4674 Label* L_success, Label* L_failure) {
4675 Label L_loop, L_fallthrough;
4676 {
4677 int label_nulls = 0;
4678 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4679 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4680 assert(label_nulls <= 1, "at most one null in the batch");
4681 }
4682 bind(L_loop);
4683 cmpq(value, Address(addr, count, Address::times_8));
4684 jcc(Assembler::equal, *L_success);
4685 addl(count, 1);
4686 cmpl(count, limit);
4687 jcc(Assembler::less, L_loop);
4688
4689 if (&L_fallthrough != L_failure) {
4690 jmp(*L_failure);
4691 }
4692 bind(L_fallthrough);
4693 }
4694
4695 // Called by code generated by check_klass_subtype_slow_path
4696 // above. This is called when there is a collision in the hashed
4697 // lookup in the secondary supers array.
4698 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
4699 Register r_array_base,
4700 Register r_array_index,
4701 Register r_bitmap,
4702 Register temp1,
4703 Register temp2,
4704 Label* L_success,
4705 Label* L_failure) {
4706 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
4707
4708 const Register
4709 r_array_length = temp1,
4710 r_sub_klass = noreg,
4711 result = noreg;
4712
4713 Label L_fallthrough;
4714 int label_nulls = 0;
4715 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4716 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4717 assert(label_nulls <= 1, "at most one null in the batch");
4718
4719 // Load the array length.
4720 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4721 // And adjust the array base to point to the data.
4722 // NB! Effectively increments current slot index by 1.
4723 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
4724 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4725
4726 // Linear probe
4727 Label L_huge;
4728
4729 // The bitmap is full to bursting.
4730 // Implicit invariant: BITMAP_FULL implies (length > 0)
4731 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
4732 jcc(Assembler::greater, L_huge);
4733
4734 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
4735 // current slot (at secondary_supers[r_array_index]) has not yet
4736 // been inspected, and r_array_index may be out of bounds if we
4737 // wrapped around the end of the array.
4738
4739 { // This is conventional linear probing, but instead of terminating
4740 // when a null entry is found in the table, we maintain a bitmap
4741 // in which a 0 indicates missing entries.
4742 // The check above guarantees there are 0s in the bitmap, so the loop
4743 // eventually terminates.
4744
4745 xorl(temp2, temp2); // = 0;
4746
4747 Label L_again;
4748 bind(L_again);
4749
4750 // Check for array wraparound.
4751 cmpl(r_array_index, r_array_length);
4752 cmovl(Assembler::greaterEqual, r_array_index, temp2);
4753
4754 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4755 jcc(Assembler::equal, *L_success);
4756
4757 // If the next bit in bitmap is zero, we're done.
4758 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
4759 jcc(Assembler::carryClear, *L_failure);
4760
4761 rorq(r_bitmap, 1); // Bits 1/2 => 0/1
4762 addl(r_array_index, 1);
4763
4764 jmp(L_again);
4765 }
4766
4767 { // Degenerate case: more than 64 secondary supers.
4768 // FIXME: We could do something smarter here, maybe a vectorized
4769 // comparison or a binary search, but is that worth any added
4770 // complexity?
4771 bind(L_huge);
4772 xorl(r_array_index, r_array_index); // = 0
4773 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
4774 L_success,
4775 (&L_fallthrough != L_failure ? L_failure : nullptr));
4776
4777 bind(L_fallthrough);
4778 }
4779 }
4780
4781 struct VerifyHelperArguments {
4782 Klass* _super;
4783 Klass* _sub;
4784 intptr_t _linear_result;
4785 intptr_t _table_result;
4786 };
4787
4788 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
4789 Klass::on_secondary_supers_verification_failure(args->_super,
4790 args->_sub,
4791 args->_linear_result,
4792 args->_table_result,
4793 msg);
4794 }
4795
4796 // Make sure that the hashed lookup and a linear scan agree.
4797 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
4798 Register r_super_klass,
4799 Register result,
4800 Register temp1,
4801 Register temp2,
4802 Register temp3) {
4803 const Register
4804 r_array_index = temp1,
4805 r_array_length = temp2,
4806 r_array_base = temp3,
4807 r_bitmap = noreg;
4808
4809 BLOCK_COMMENT("verify_secondary_supers_table {");
4810
4811 Label L_success, L_failure, L_check, L_done;
4812
4813 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4814 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4815 // And adjust the array base to point to the data.
4816 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4817
4818 testl(r_array_length, r_array_length); // array_length == 0?
4819 jcc(Assembler::zero, L_failure);
4820
4821 movl(r_array_index, 0);
4822 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
4823 // fall through to L_failure
4824
4825 const Register linear_result = r_array_index; // reuse temp1
4826
4827 bind(L_failure); // not present
4828 movl(linear_result, 1);
4829 jmp(L_check);
4830
4831 bind(L_success); // present
4832 movl(linear_result, 0);
4833
4834 bind(L_check);
4835 cmpl(linear_result, result);
4836 jcc(Assembler::equal, L_done);
4837
4838 { // To avoid calling convention issues, build a record on the stack
4839 // and pass the pointer to that instead.
4840 push(result);
4841 push(linear_result);
4842 push(r_sub_klass);
4843 push(r_super_klass);
4844 movptr(c_rarg1, rsp);
4845 movptr(c_rarg0, (uintptr_t) "mismatch");
4846 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
4847 should_not_reach_here();
4848 }
4849 bind(L_done);
4850
4851 BLOCK_COMMENT("} verify_secondary_supers_table");
4852 }
4853
4854 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
4855
4856 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
4857 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
4858
4859 Label L_fallthrough;
4860 if (L_fast_path == nullptr) {
4861 L_fast_path = &L_fallthrough;
4862 } else if (L_slow_path == nullptr) {
4863 L_slow_path = &L_fallthrough;
4864 }
4865
4866 // Fast path check: class is fully initialized.
4867 // init_state needs acquire, but x86 is TSO, and so we are already good.
4868 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4869 jcc(Assembler::equal, *L_fast_path);
4870
4871 // Fast path check: current thread is initializer thread
4872 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
4873 if (L_slow_path == &L_fallthrough) {
4874 jcc(Assembler::equal, *L_fast_path);
4875 bind(*L_slow_path);
4876 } else if (L_fast_path == &L_fallthrough) {
4877 jcc(Assembler::notEqual, *L_slow_path);
4878 bind(*L_fast_path);
4879 } else {
4880 Unimplemented();
4881 }
4882 }
4883
4884 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
4885 if (VM_Version::supports_cmov()) {
4886 cmovl(cc, dst, src);
4887 } else {
4888 Label L;
4889 jccb(negate_condition(cc), L);
4890 movl(dst, src);
4891 bind(L);
4892 }
4893 }
4894
4895 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
4896 if (VM_Version::supports_cmov()) {
4897 cmovl(cc, dst, src);
4898 } else {
4899 Label L;
4900 jccb(negate_condition(cc), L);
4901 movl(dst, src);
4902 bind(L);
4903 }
4904 }
4905
4906 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
4907 if (!VerifyOops || VerifyAdapterSharing) {
4908 // Below address of the code string confuses VerifyAdapterSharing
4909 // because it may differ between otherwise equivalent adapters.
4910 return;
4911 }
4912
4913 BLOCK_COMMENT("verify_oop {");
4914 push(rscratch1);
4915 push(rax); // save rax
4916 push(reg); // pass register argument
4917
4918 // Pass register number to verify_oop_subroutine
4919 const char* b = nullptr;
4920 {
4921 ResourceMark rm;
4922 stringStream ss;
4923 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
4924 b = code_string(ss.as_string());
4925 }
4926 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
4927 pushptr(buffer.addr(), rscratch1);
4928
4929 // call indirectly to solve generation ordering problem
4930 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4931 call(rax);
4932 // Caller pops the arguments (oop, message) and restores rax, r10
4933 BLOCK_COMMENT("} verify_oop");
4934 }
4935
4936 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
4937 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
4938 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
4939 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
4940 vpternlogd(dst, 0xFF, dst, dst, vector_len);
4941 } else if (VM_Version::supports_avx()) {
4942 vpcmpeqd(dst, dst, dst, vector_len);
4943 } else {
4944 pcmpeqd(dst, dst);
4945 }
4946 }
4947
4948 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
4949 int extra_slot_offset) {
4950 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
4951 int stackElementSize = Interpreter::stackElementSize;
4952 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
4953 #ifdef ASSERT
4954 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
4955 assert(offset1 - offset == stackElementSize, "correct arithmetic");
4956 #endif
4957 Register scale_reg = noreg;
4958 Address::ScaleFactor scale_factor = Address::no_scale;
4959 if (arg_slot.is_constant()) {
4960 offset += arg_slot.as_constant() * stackElementSize;
4961 } else {
4962 scale_reg = arg_slot.as_register();
4963 scale_factor = Address::times(stackElementSize);
4964 }
4965 offset += wordSize; // return PC is on stack
4966 return Address(rsp, scale_reg, scale_factor, offset);
4967 }
4968
4969 // Handle the receiver type profile update given the "recv" klass.
4970 //
4971 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
4972 // If there are no matching or claimable receiver entries in RD, updates
4973 // the polymorphic counter.
4974 //
4975 // This code expected to run by either the interpreter or JIT-ed code, without
4976 // extra synchronization. For safety, receiver cells are claimed atomically, which
4977 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
4978 // counter updates are not atomic.
4979 //
4980 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
4981 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
4982 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
4983 int poly_count_offset = in_bytes(CounterData::count_offset());
4984 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
4985 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
4986
4987 // Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
4988 assert(is_aligned(mdp_offset, BytesPerWord), "sanity");
4989 base_receiver_offset += mdp_offset;
4990 end_receiver_offset += mdp_offset;
4991 poly_count_offset += mdp_offset;
4992
4993 // Scale down to optimize encoding. Slots are pointer-sized.
4994 assert(is_aligned(base_receiver_offset, BytesPerWord), "sanity");
4995 assert(is_aligned(end_receiver_offset, BytesPerWord), "sanity");
4996 assert(is_aligned(poly_count_offset, BytesPerWord), "sanity");
4997 assert(is_aligned(receiver_step, BytesPerWord), "sanity");
4998 assert(is_aligned(receiver_to_count_step, BytesPerWord), "sanity");
4999 base_receiver_offset >>= LogBytesPerWord;
5000 end_receiver_offset >>= LogBytesPerWord;
5001 poly_count_offset >>= LogBytesPerWord;
5002 receiver_step >>= LogBytesPerWord;
5003 receiver_to_count_step >>= LogBytesPerWord;
5004
5005 #ifdef ASSERT
5006 // We are about to walk the MDO slots without asking for offsets.
5007 // Check that our math hits all the right spots.
5008 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
5009 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
5010 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
5011 int offset = base_receiver_offset + receiver_step*c;
5012 int count_offset = offset + receiver_to_count_step;
5013 assert((offset << LogBytesPerWord) == real_recv_offset, "receiver slot math");
5014 assert((count_offset << LogBytesPerWord) == real_count_offset, "receiver count math");
5015 }
5016 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
5017 assert(poly_count_offset << LogBytesPerWord == real_poly_count_offset, "poly counter math");
5018 #endif
5019
5020 // Corner case: no profile table. Increment poly counter and exit.
5021 if (ReceiverTypeData::row_limit() == 0) {
5022 addptr(Address(mdp, poly_count_offset, Address::times_ptr), DataLayout::counter_increment);
5023 return;
5024 }
5025
5026 Register offset = rscratch1;
5027
5028 Label L_loop_search_receiver, L_loop_search_empty;
5029 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
5030
5031 // The code here recognizes three major cases:
5032 // A. Fastest: receiver found in the table
5033 // B. Fast: no receiver in the table, and the table is full
5034 // C. Slow: no receiver in the table, free slots in the table
5035 //
5036 // The case A performance is most important, as perfectly-behaved code would end up
5037 // there, especially with larger TypeProfileWidth. The case B performance is
5038 // important as well, this is where bulk of code would land for normally megamorphic
5039 // cases. The case C performance is not essential, its job is to deal with installation
5040 // races, we optimize for code density instead. Case C needs to make sure that receiver
5041 // rows are only claimed once. This makes sure we never overwrite a row for another
5042 // receiver and never duplicate the receivers in the list, making profile type-accurate.
5043 //
5044 // It is very tempting to handle these cases in a single loop, and claim the first slot
5045 // without checking the rest of the table. But, profiling code should tolerate free slots
5046 // in the table, as class unloading can clear them. After such cleanup, the receiver
5047 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
5048 // to complete, before trying to install new slots. Splitting the code in several tight
5049 // loops also helpfully optimizes for cases A and B.
5050 //
5051 // This code is effectively:
5052 //
5053 // restart:
5054 // // Fastest: receiver is already installed
5055 // for (i = 0; i < receiver_count(); i++) {
5056 // if (receiver(i) == recv) goto found_recv(i);
5057 // }
5058 //
5059 // // Fast: no receiver, but profile is full
5060 // for (i = 0; i < receiver_count(); i++) {
5061 // if (receiver(i) == null) goto found_null(i);
5062 // }
5063 // goto polymorphic
5064 //
5065 // // Slow: try to install receiver
5066 // found_null(i):
5067 // CAS(&receiver(i), null, recv);
5068 // goto restart
5069 //
5070 // polymorphic:
5071 // count++;
5072 // return
5073 //
5074 // found_recv(i):
5075 // *receiver_count(i)++
5076 //
5077
5078 bind(L_restart);
5079
5080 // Fastest: receiver is already installed
5081 movptr(offset, base_receiver_offset);
5082 bind(L_loop_search_receiver);
5083 cmpptr(recv, Address(mdp, offset, Address::times_ptr));
5084 jccb(Assembler::equal, L_found_recv);
5085 addptr(offset, receiver_step);
5086 cmpptr(offset, end_receiver_offset);
5087 jccb(Assembler::notEqual, L_loop_search_receiver);
5088
5089 // Fast: no receiver, but profile is full
5090 movptr(offset, base_receiver_offset);
5091 bind(L_loop_search_empty);
5092 cmpptr(Address(mdp, offset, Address::times_ptr), NULL_WORD);
5093 jccb(Assembler::equal, L_found_empty);
5094 addptr(offset, receiver_step);
5095 cmpptr(offset, end_receiver_offset);
5096 jccb(Assembler::notEqual, L_loop_search_empty);
5097 jmpb(L_polymorphic);
5098
5099 // Slow: try to install receiver
5100 bind(L_found_empty);
5101
5102 // Atomically swing receiver slot: null -> recv.
5103 //
5104 // The update code uses CAS, which wants RAX register specifically, *and* it needs
5105 // other important registers untouched, as they form the address. Therefore, we need
5106 // to shift any important registers from RAX into some other spare register. If we
5107 // have a spare register, we are forced to save it on stack here.
5108
5109 Register spare_reg = noreg;
5110 Register shifted_mdp = mdp;
5111 Register shifted_recv = recv;
5112 if (recv == rax || mdp == rax) {
5113 spare_reg = (recv != rbx && mdp != rbx) ? rbx :
5114 (recv != rcx && mdp != rcx) ? rcx :
5115 rdx;
5116 assert_different_registers(mdp, recv, offset, spare_reg);
5117
5118 push(spare_reg);
5119 if (recv == rax) {
5120 movptr(spare_reg, recv);
5121 shifted_recv = spare_reg;
5122 } else {
5123 assert(mdp == rax, "Remaining case");
5124 movptr(spare_reg, mdp);
5125 shifted_mdp = spare_reg;
5126 }
5127 } else {
5128 push(rax);
5129 }
5130
5131 // None of the important registers are in RAX after this shuffle.
5132 assert_different_registers(rax, shifted_mdp, shifted_recv, offset);
5133
5134 xorptr(rax, rax);
5135 cmpxchgptr(shifted_recv, Address(shifted_mdp, offset, Address::times_ptr));
5136
5137 // Unshift registers.
5138 if (recv == rax || mdp == rax) {
5139 movptr(rax, spare_reg);
5140 pop(spare_reg);
5141 } else {
5142 pop(rax);
5143 }
5144
5145 // CAS success means the slot now has the receiver we want. CAS failure means
5146 // something had claimed the slot concurrently: it can be the same receiver we want,
5147 // or something else. Since this is a slow path, we can optimize for code density,
5148 // and just restart the search from the beginning.
5149 jmpb(L_restart);
5150
5151 // Counter updates:
5152
5153 // Increment polymorphic counter instead of receiver slot.
5154 bind(L_polymorphic);
5155 movptr(offset, poly_count_offset);
5156 jmpb(L_count_update);
5157
5158 // Found a receiver, convert its slot offset to corresponding count offset.
5159 bind(L_found_recv);
5160 addptr(offset, receiver_to_count_step);
5161
5162 bind(L_count_update);
5163 addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
5164 }
5165
5166 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5167 if (!VerifyOops || VerifyAdapterSharing) {
5168 // Below address of the code string confuses VerifyAdapterSharing
5169 // because it may differ between otherwise equivalent adapters.
5170 return;
5171 }
5172
5173 push(rscratch1);
5174 push(rax); // save rax,
5175 // addr may contain rsp so we will have to adjust it based on the push
5176 // we just did (and on 64 bit we do two pushes)
5177 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5178 // stores rax into addr which is backwards of what was intended.
5179 if (addr.uses(rsp)) {
5180 lea(rax, addr);
5181 pushptr(Address(rax, 2 * BytesPerWord));
5182 } else {
5183 pushptr(addr);
5184 }
5185
5186 // Pass register number to verify_oop_subroutine
5187 const char* b = nullptr;
5188 {
5189 ResourceMark rm;
5190 stringStream ss;
5191 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
5192 b = code_string(ss.as_string());
5193 }
5194 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5195 pushptr(buffer.addr(), rscratch1);
5196
5197 // call indirectly to solve generation ordering problem
5198 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5199 call(rax);
5200 // Caller pops the arguments (addr, message) and restores rax, r10.
5201 }
5202
5203 void MacroAssembler::verify_tlab() {
5204 #ifdef ASSERT
5205 if (UseTLAB && VerifyOops) {
5206 Label next, ok;
5207 Register t1 = rsi;
5208
5209 push(t1);
5210
5211 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5212 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
5213 jcc(Assembler::aboveEqual, next);
5214 STOP("assert(top >= start)");
5215 should_not_reach_here();
5216
5217 bind(next);
5218 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
5219 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5220 jcc(Assembler::aboveEqual, ok);
5221 STOP("assert(top <= end)");
5222 should_not_reach_here();
5223
5224 bind(ok);
5225 pop(t1);
5226 }
5227 #endif
5228 }
5229
5230 class ControlWord {
5231 public:
5232 int32_t _value;
5233
5234 int rounding_control() const { return (_value >> 10) & 3 ; }
5235 int precision_control() const { return (_value >> 8) & 3 ; }
5236 bool precision() const { return ((_value >> 5) & 1) != 0; }
5237 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5238 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5239 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5240 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5241 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5242
5243 void print() const {
5244 // rounding control
5245 const char* rc;
5246 switch (rounding_control()) {
5247 case 0: rc = "round near"; break;
5248 case 1: rc = "round down"; break;
5249 case 2: rc = "round up "; break;
5250 case 3: rc = "chop "; break;
5251 default:
5252 rc = nullptr; // silence compiler warnings
5253 fatal("Unknown rounding control: %d", rounding_control());
5254 };
5255 // precision control
5256 const char* pc;
5257 switch (precision_control()) {
5258 case 0: pc = "24 bits "; break;
5259 case 1: pc = "reserved"; break;
5260 case 2: pc = "53 bits "; break;
5261 case 3: pc = "64 bits "; break;
5262 default:
5263 pc = nullptr; // silence compiler warnings
5264 fatal("Unknown precision control: %d", precision_control());
5265 };
5266 // flags
5267 char f[9];
5268 f[0] = ' ';
5269 f[1] = ' ';
5270 f[2] = (precision ()) ? 'P' : 'p';
5271 f[3] = (underflow ()) ? 'U' : 'u';
5272 f[4] = (overflow ()) ? 'O' : 'o';
5273 f[5] = (zero_divide ()) ? 'Z' : 'z';
5274 f[6] = (denormalized()) ? 'D' : 'd';
5275 f[7] = (invalid ()) ? 'I' : 'i';
5276 f[8] = '\x0';
5277 // output
5278 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5279 }
5280
5281 };
5282
5283 class StatusWord {
5284 public:
5285 int32_t _value;
5286
5287 bool busy() const { return ((_value >> 15) & 1) != 0; }
5288 bool C3() const { return ((_value >> 14) & 1) != 0; }
5289 bool C2() const { return ((_value >> 10) & 1) != 0; }
5290 bool C1() const { return ((_value >> 9) & 1) != 0; }
5291 bool C0() const { return ((_value >> 8) & 1) != 0; }
5292 int top() const { return (_value >> 11) & 7 ; }
5293 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5294 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5295 bool precision() const { return ((_value >> 5) & 1) != 0; }
5296 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5297 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5298 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5299 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5300 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5301
5302 void print() const {
5303 // condition codes
5304 char c[5];
5305 c[0] = (C3()) ? '3' : '-';
5306 c[1] = (C2()) ? '2' : '-';
5307 c[2] = (C1()) ? '1' : '-';
5308 c[3] = (C0()) ? '0' : '-';
5309 c[4] = '\x0';
5310 // flags
5311 char f[9];
5312 f[0] = (error_status()) ? 'E' : '-';
5313 f[1] = (stack_fault ()) ? 'S' : '-';
5314 f[2] = (precision ()) ? 'P' : '-';
5315 f[3] = (underflow ()) ? 'U' : '-';
5316 f[4] = (overflow ()) ? 'O' : '-';
5317 f[5] = (zero_divide ()) ? 'Z' : '-';
5318 f[6] = (denormalized()) ? 'D' : '-';
5319 f[7] = (invalid ()) ? 'I' : '-';
5320 f[8] = '\x0';
5321 // output
5322 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5323 }
5324
5325 };
5326
5327 class TagWord {
5328 public:
5329 int32_t _value;
5330
5331 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5332
5333 void print() const {
5334 printf("%04x", _value & 0xFFFF);
5335 }
5336
5337 };
5338
5339 class FPU_Register {
5340 public:
5341 int32_t _m0;
5342 int32_t _m1;
5343 int16_t _ex;
5344
5345 bool is_indefinite() const {
5346 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5347 }
5348
5349 void print() const {
5350 char sign = (_ex < 0) ? '-' : '+';
5351 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5352 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5353 };
5354
5355 };
5356
5357 class FPU_State {
5358 public:
5359 enum {
5360 register_size = 10,
5361 number_of_registers = 8,
5362 register_mask = 7
5363 };
5364
5365 ControlWord _control_word;
5366 StatusWord _status_word;
5367 TagWord _tag_word;
5368 int32_t _error_offset;
5369 int32_t _error_selector;
5370 int32_t _data_offset;
5371 int32_t _data_selector;
5372 int8_t _register[register_size * number_of_registers];
5373
5374 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5375 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5376
5377 const char* tag_as_string(int tag) const {
5378 switch (tag) {
5379 case 0: return "valid";
5380 case 1: return "zero";
5381 case 2: return "special";
5382 case 3: return "empty";
5383 }
5384 ShouldNotReachHere();
5385 return nullptr;
5386 }
5387
5388 void print() const {
5389 // print computation registers
5390 { int t = _status_word.top();
5391 for (int i = 0; i < number_of_registers; i++) {
5392 int j = (i - t) & register_mask;
5393 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5394 st(j)->print();
5395 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5396 }
5397 }
5398 printf("\n");
5399 // print control registers
5400 printf("ctrl = "); _control_word.print(); printf("\n");
5401 printf("stat = "); _status_word .print(); printf("\n");
5402 printf("tags = "); _tag_word .print(); printf("\n");
5403 }
5404
5405 };
5406
5407 class Flag_Register {
5408 public:
5409 int32_t _value;
5410
5411 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5412 bool direction() const { return ((_value >> 10) & 1) != 0; }
5413 bool sign() const { return ((_value >> 7) & 1) != 0; }
5414 bool zero() const { return ((_value >> 6) & 1) != 0; }
5415 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5416 bool parity() const { return ((_value >> 2) & 1) != 0; }
5417 bool carry() const { return ((_value >> 0) & 1) != 0; }
5418
5419 void print() const {
5420 // flags
5421 char f[8];
5422 f[0] = (overflow ()) ? 'O' : '-';
5423 f[1] = (direction ()) ? 'D' : '-';
5424 f[2] = (sign ()) ? 'S' : '-';
5425 f[3] = (zero ()) ? 'Z' : '-';
5426 f[4] = (auxiliary_carry()) ? 'A' : '-';
5427 f[5] = (parity ()) ? 'P' : '-';
5428 f[6] = (carry ()) ? 'C' : '-';
5429 f[7] = '\x0';
5430 // output
5431 printf("%08x flags = %s", _value, f);
5432 }
5433
5434 };
5435
5436 class IU_Register {
5437 public:
5438 int32_t _value;
5439
5440 void print() const {
5441 printf("%08x %11d", _value, _value);
5442 }
5443
5444 };
5445
5446 class IU_State {
5447 public:
5448 Flag_Register _eflags;
5449 IU_Register _rdi;
5450 IU_Register _rsi;
5451 IU_Register _rbp;
5452 IU_Register _rsp;
5453 IU_Register _rbx;
5454 IU_Register _rdx;
5455 IU_Register _rcx;
5456 IU_Register _rax;
5457
5458 void print() const {
5459 // computation registers
5460 printf("rax, = "); _rax.print(); printf("\n");
5461 printf("rbx, = "); _rbx.print(); printf("\n");
5462 printf("rcx = "); _rcx.print(); printf("\n");
5463 printf("rdx = "); _rdx.print(); printf("\n");
5464 printf("rdi = "); _rdi.print(); printf("\n");
5465 printf("rsi = "); _rsi.print(); printf("\n");
5466 printf("rbp, = "); _rbp.print(); printf("\n");
5467 printf("rsp = "); _rsp.print(); printf("\n");
5468 printf("\n");
5469 // control registers
5470 printf("flgs = "); _eflags.print(); printf("\n");
5471 }
5472 };
5473
5474
5475 class CPU_State {
5476 public:
5477 FPU_State _fpu_state;
5478 IU_State _iu_state;
5479
5480 void print() const {
5481 printf("--------------------------------------------------\n");
5482 _iu_state .print();
5483 printf("\n");
5484 _fpu_state.print();
5485 printf("--------------------------------------------------\n");
5486 }
5487
5488 };
5489
5490
5491 static void _print_CPU_state(CPU_State* state) {
5492 state->print();
5493 };
5494
5495
5496 void MacroAssembler::print_CPU_state() {
5497 push_CPU_state();
5498 push(rsp); // pass CPU state
5499 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5500 addptr(rsp, wordSize); // discard argument
5501 pop_CPU_state();
5502 }
5503
5504 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
5505 // Either restore the MXCSR register after returning from the JNI Call
5506 // or verify that it wasn't changed (with -Xcheck:jni flag).
5507 if (VM_Version::supports_sse()) {
5508 if (RestoreMXCSROnJNICalls) {
5509 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
5510 } else if (CheckJNICalls) {
5511 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5512 }
5513 }
5514 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5515 vzeroupper();
5516 }
5517
5518 // ((OopHandle)result).resolve();
5519 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
5520 assert_different_registers(result, tmp);
5521
5522 // Only 64 bit platforms support GCs that require a tmp register
5523 // Only IN_HEAP loads require a thread_tmp register
5524 // OopHandle::resolve is an indirection like jobject.
5525 access_load_at(T_OBJECT, IN_NATIVE,
5526 result, Address(result, 0), tmp);
5527 }
5528
5529 // ((WeakHandle)result).resolve();
5530 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
5531 assert_different_registers(rresult, rtmp);
5532 Label resolved;
5533
5534 // A null weak handle resolves to null.
5535 cmpptr(rresult, 0);
5536 jcc(Assembler::equal, resolved);
5537
5538 // Only 64 bit platforms support GCs that require a tmp register
5539 // Only IN_HEAP loads require a thread_tmp register
5540 // WeakHandle::resolve is an indirection like jweak.
5541 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5542 rresult, Address(rresult, 0), rtmp);
5543 bind(resolved);
5544 }
5545
5546 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5547 // get mirror
5548 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5549 load_method_holder(mirror, method);
5550 movptr(mirror, Address(mirror, mirror_offset));
5551 resolve_oop_handle(mirror, tmp);
5552 }
5553
5554 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5555 load_method_holder(rresult, rmethod);
5556 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5557 }
5558
5559 void MacroAssembler::load_method_holder(Register holder, Register method) {
5560 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5561 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5562 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5563 }
5564
5565 void MacroAssembler::load_metadata(Register dst, Register src) {
5566 if (UseCompactObjectHeaders) {
5567 load_narrow_klass_compact(dst, src);
5568 } else {
5569 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5570 }
5571 }
5572
5573 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5574 assert(UseCompactObjectHeaders, "expect compact object headers");
5575 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5576 shrq(dst, markWord::klass_shift);
5577 }
5578
5579 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5580 assert_different_registers(src, tmp);
5581 assert_different_registers(dst, tmp);
5582
5583 if (UseCompactObjectHeaders) {
5584 load_narrow_klass_compact(dst, src);
5585 decode_klass_not_null(dst, tmp);
5586 } else {
5587 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5588 decode_klass_not_null(dst, tmp);
5589 }
5590 }
5591
5592 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
5593 load_klass(dst, src, tmp);
5594 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5595 }
5596
5597 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5598 assert(!UseCompactObjectHeaders, "not with compact headers");
5599 assert_different_registers(src, tmp);
5600 assert_different_registers(dst, tmp);
5601 encode_klass_not_null(src, tmp);
5602 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5603 }
5604
5605 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5606 if (UseCompactObjectHeaders) {
5607 assert(tmp != noreg, "need tmp");
5608 assert_different_registers(klass, obj, tmp);
5609 load_narrow_klass_compact(tmp, obj);
5610 cmpl(klass, tmp);
5611 } else {
5612 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5613 }
5614 }
5615
5616 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5617 if (UseCompactObjectHeaders) {
5618 assert(tmp2 != noreg, "need tmp2");
5619 assert_different_registers(obj1, obj2, tmp1, tmp2);
5620 load_narrow_klass_compact(tmp1, obj1);
5621 load_narrow_klass_compact(tmp2, obj2);
5622 cmpl(tmp1, tmp2);
5623 } else {
5624 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5625 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5626 }
5627 }
5628
5629 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5630 Register tmp1) {
5631 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5632 decorators = AccessInternal::decorator_fixup(decorators, type);
5633 bool as_raw = (decorators & AS_RAW) != 0;
5634 if (as_raw) {
5635 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
5636 } else {
5637 bs->load_at(this, decorators, type, dst, src, tmp1);
5638 }
5639 }
5640
5641 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5642 Register tmp1, Register tmp2, Register tmp3) {
5643 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5644 decorators = AccessInternal::decorator_fixup(decorators, type);
5645 bool as_raw = (decorators & AS_RAW) != 0;
5646 if (as_raw) {
5647 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5648 } else {
5649 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5650 }
5651 }
5652
5653 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5654 Register inline_layout_info) {
5655 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5656 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5657 }
5658
5659 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5660 movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
5661 movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
5662 }
5663
5664 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
5665 // ((address) (void*) o) + vk->payload_offset();
5666 Register offset = (data == oop) ? rscratch1 : data;
5667 payload_offset(inline_klass, offset);
5668 if (data == oop) {
5669 addptr(data, offset);
5670 } else {
5671 lea(data, Address(oop, offset));
5672 }
5673 }
5674
5675 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5676 Register index, Register data) {
5677 assert(index != rcx, "index needs to shift by rcx");
5678 assert_different_registers(array, array_klass, index);
5679 assert_different_registers(rcx, array, index);
5680
5681 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5682 movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
5683
5684 // Klass::layout_helper_log2_element_size(lh)
5685 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5686 shrl(rcx, Klass::_lh_log2_element_size_shift);
5687 andl(rcx, Klass::_lh_log2_element_size_mask);
5688 shlptr(index); // index << rcx
5689
5690 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
5691 }
5692
5693 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5694 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
5695 }
5696
5697 // Doesn't do verification, generates fixed size code
5698 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5699 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
5700 }
5701
5702 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5703 Register tmp2, Register tmp3, DecoratorSet decorators) {
5704 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5705 }
5706
5707 // Used for storing nulls.
5708 void MacroAssembler::store_heap_oop_null(Address dst) {
5709 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5710 }
5711
5712 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5713 assert(!UseCompactObjectHeaders, "Don't use with compact headers");
5714 // Store to klass gap in destination
5715 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5716 }
5717
5718 #ifdef ASSERT
5719 void MacroAssembler::verify_heapbase(const char* msg) {
5720 assert (UseCompressedOops, "should be compressed");
5721 assert (Universe::heap() != nullptr, "java heap should be initialized");
5722 if (CheckCompressedOops) {
5723 Label ok;
5724 ExternalAddress src2(CompressedOops::base_addr());
5725 const bool is_src2_reachable = reachable(src2);
5726 if (!is_src2_reachable) {
5727 push(rscratch1); // cmpptr trashes rscratch1
5728 }
5729 cmpptr(r12_heapbase, src2, rscratch1);
5730 jcc(Assembler::equal, ok);
5731 STOP(msg);
5732 bind(ok);
5733 if (!is_src2_reachable) {
5734 pop(rscratch1);
5735 }
5736 }
5737 }
5738 #endif
5739
5740 // Algorithm must match oop.inline.hpp encode_heap_oop.
5741 void MacroAssembler::encode_heap_oop(Register r) {
5742 #ifdef ASSERT
5743 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5744 #endif
5745 verify_oop_msg(r, "broken oop in encode_heap_oop");
5746 if (CompressedOops::base() == nullptr) {
5747 if (CompressedOops::shift() != 0) {
5748 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5749 shrq(r, LogMinObjAlignmentInBytes);
5750 }
5751 return;
5752 }
5753 testq(r, r);
5754 cmovq(Assembler::equal, r, r12_heapbase);
5755 subq(r, r12_heapbase);
5756 shrq(r, LogMinObjAlignmentInBytes);
5757 }
5758
5759 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5760 #ifdef ASSERT
5761 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5762 if (CheckCompressedOops) {
5763 Label ok;
5764 testq(r, r);
5765 jcc(Assembler::notEqual, ok);
5766 STOP("null oop passed to encode_heap_oop_not_null");
5767 bind(ok);
5768 }
5769 #endif
5770 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5771 if (CompressedOops::base() != nullptr) {
5772 subq(r, r12_heapbase);
5773 }
5774 if (CompressedOops::shift() != 0) {
5775 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5776 shrq(r, LogMinObjAlignmentInBytes);
5777 }
5778 }
5779
5780 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5781 #ifdef ASSERT
5782 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5783 if (CheckCompressedOops) {
5784 Label ok;
5785 testq(src, src);
5786 jcc(Assembler::notEqual, ok);
5787 STOP("null oop passed to encode_heap_oop_not_null2");
5788 bind(ok);
5789 }
5790 #endif
5791 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5792 if (dst != src) {
5793 movq(dst, src);
5794 }
5795 if (CompressedOops::base() != nullptr) {
5796 subq(dst, r12_heapbase);
5797 }
5798 if (CompressedOops::shift() != 0) {
5799 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5800 shrq(dst, LogMinObjAlignmentInBytes);
5801 }
5802 }
5803
5804 void MacroAssembler::decode_heap_oop(Register r) {
5805 #ifdef ASSERT
5806 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5807 #endif
5808 if (CompressedOops::base() == nullptr) {
5809 if (CompressedOops::shift() != 0) {
5810 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5811 shlq(r, LogMinObjAlignmentInBytes);
5812 }
5813 } else {
5814 Label done;
5815 shlq(r, LogMinObjAlignmentInBytes);
5816 jccb(Assembler::equal, done);
5817 addq(r, r12_heapbase);
5818 bind(done);
5819 }
5820 verify_oop_msg(r, "broken oop in decode_heap_oop");
5821 }
5822
5823 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5824 // Note: it will change flags
5825 assert (UseCompressedOops, "should only be used for compressed headers");
5826 assert (Universe::heap() != nullptr, "java heap should be initialized");
5827 // Cannot assert, unverified entry point counts instructions (see .ad file)
5828 // vtableStubs also counts instructions in pd_code_size_limit.
5829 // Also do not verify_oop as this is called by verify_oop.
5830 if (CompressedOops::shift() != 0) {
5831 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5832 shlq(r, LogMinObjAlignmentInBytes);
5833 if (CompressedOops::base() != nullptr) {
5834 addq(r, r12_heapbase);
5835 }
5836 } else {
5837 assert (CompressedOops::base() == nullptr, "sanity");
5838 }
5839 }
5840
5841 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5842 // Note: it will change flags
5843 assert (UseCompressedOops, "should only be used for compressed headers");
5844 assert (Universe::heap() != nullptr, "java heap should be initialized");
5845 // Cannot assert, unverified entry point counts instructions (see .ad file)
5846 // vtableStubs also counts instructions in pd_code_size_limit.
5847 // Also do not verify_oop as this is called by verify_oop.
5848 if (CompressedOops::shift() != 0) {
5849 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5850 if (LogMinObjAlignmentInBytes == Address::times_8) {
5851 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5852 } else {
5853 if (dst != src) {
5854 movq(dst, src);
5855 }
5856 shlq(dst, LogMinObjAlignmentInBytes);
5857 if (CompressedOops::base() != nullptr) {
5858 addq(dst, r12_heapbase);
5859 }
5860 }
5861 } else {
5862 assert (CompressedOops::base() == nullptr, "sanity");
5863 if (dst != src) {
5864 movq(dst, src);
5865 }
5866 }
5867 }
5868
5869 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5870 BLOCK_COMMENT("encode_klass_not_null {");
5871 assert_different_registers(r, tmp);
5872 if (CompressedKlassPointers::base() != nullptr) {
5873 if (AOTCodeCache::is_on_for_dump()) {
5874 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5875 } else {
5876 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5877 }
5878 subq(r, tmp);
5879 }
5880 if (CompressedKlassPointers::shift() != 0) {
5881 shrq(r, CompressedKlassPointers::shift());
5882 }
5883 BLOCK_COMMENT("} encode_klass_not_null");
5884 }
5885
5886 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5887 BLOCK_COMMENT("encode_and_move_klass_not_null {");
5888 assert_different_registers(src, dst);
5889 if (CompressedKlassPointers::base() != nullptr) {
5890 if (AOTCodeCache::is_on_for_dump()) {
5891 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5892 negq(dst);
5893 } else {
5894 movptr(dst, -(intptr_t)CompressedKlassPointers::base());
5895 }
5896 addq(dst, src);
5897 } else {
5898 movptr(dst, src);
5899 }
5900 if (CompressedKlassPointers::shift() != 0) {
5901 shrq(dst, CompressedKlassPointers::shift());
5902 }
5903 BLOCK_COMMENT("} encode_and_move_klass_not_null");
5904 }
5905
5906 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5907 BLOCK_COMMENT("decode_klass_not_null {");
5908 assert_different_registers(r, tmp);
5909 // Note: it will change flags
5910 // Cannot assert, unverified entry point counts instructions (see .ad file)
5911 // vtableStubs also counts instructions in pd_code_size_limit.
5912 // Also do not verify_oop as this is called by verify_oop.
5913 if (CompressedKlassPointers::shift() != 0) {
5914 shlq(r, CompressedKlassPointers::shift());
5915 }
5916 if (CompressedKlassPointers::base() != nullptr) {
5917 if (AOTCodeCache::is_on_for_dump()) {
5918 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5919 } else {
5920 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5921 }
5922 addq(r, tmp);
5923 }
5924 BLOCK_COMMENT("} decode_klass_not_null");
5925 }
5926
5927 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5928 BLOCK_COMMENT("decode_and_move_klass_not_null {");
5929 assert_different_registers(src, dst);
5930 // Note: it will change flags
5931 // Cannot assert, unverified entry point counts instructions (see .ad file)
5932 // vtableStubs also counts instructions in pd_code_size_limit.
5933 // Also do not verify_oop as this is called by verify_oop.
5934
5935 if (CompressedKlassPointers::base() == nullptr &&
5936 CompressedKlassPointers::shift() == 0) {
5937 // The best case scenario is that there is no base or shift. Then it is already
5938 // a pointer that needs nothing but a register rename.
5939 movl(dst, src);
5940 } else {
5941 if (CompressedKlassPointers::shift() <= Address::times_8) {
5942 if (CompressedKlassPointers::base() != nullptr) {
5943 if (AOTCodeCache::is_on_for_dump()) {
5944 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5945 } else {
5946 movptr(dst, (intptr_t)CompressedKlassPointers::base());
5947 }
5948 } else {
5949 xorq(dst, dst);
5950 }
5951 if (CompressedKlassPointers::shift() != 0) {
5952 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
5953 leaq(dst, Address(dst, src, Address::times_8, 0));
5954 } else {
5955 addq(dst, src);
5956 }
5957 } else {
5958 if (CompressedKlassPointers::base() != nullptr) {
5959 if (AOTCodeCache::is_on_for_dump()) {
5960 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5961 shrq(dst, CompressedKlassPointers::shift());
5962 } else {
5963 const intptr_t base_right_shifted =
5964 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5965 movptr(dst, base_right_shifted);
5966 }
5967 } else {
5968 xorq(dst, dst);
5969 }
5970 addq(dst, src);
5971 shlq(dst, CompressedKlassPointers::shift());
5972 }
5973 }
5974 BLOCK_COMMENT("} decode_and_move_klass_not_null");
5975 }
5976
5977 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5978 assert (UseCompressedOops, "should only be used for compressed headers");
5979 assert (Universe::heap() != nullptr, "java heap should be initialized");
5980 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5981 int oop_index = oop_recorder()->find_index(obj);
5982 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5983 mov_narrow_oop(dst, oop_index, rspec);
5984 }
5985
5986 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5987 assert (UseCompressedOops, "should only be used for compressed headers");
5988 assert (Universe::heap() != nullptr, "java heap should be initialized");
5989 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5990 int oop_index = oop_recorder()->find_index(obj);
5991 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5992 mov_narrow_oop(dst, oop_index, rspec);
5993 }
5994
5995 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5996 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5997 int klass_index = oop_recorder()->find_index(k);
5998 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5999 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6000 }
6001
6002 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6003 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6004 int klass_index = oop_recorder()->find_index(k);
6005 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6006 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6007 }
6008
6009 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6010 assert (UseCompressedOops, "should only be used for compressed headers");
6011 assert (Universe::heap() != nullptr, "java heap should be initialized");
6012 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6013 int oop_index = oop_recorder()->find_index(obj);
6014 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6015 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6016 }
6017
6018 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6019 assert (UseCompressedOops, "should only be used for compressed headers");
6020 assert (Universe::heap() != nullptr, "java heap should be initialized");
6021 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6022 int oop_index = oop_recorder()->find_index(obj);
6023 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6024 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6025 }
6026
6027 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6028 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6029 int klass_index = oop_recorder()->find_index(k);
6030 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6031 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6032 }
6033
6034 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6035 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6036 int klass_index = oop_recorder()->find_index(k);
6037 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6038 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6039 }
6040
6041 void MacroAssembler::reinit_heapbase() {
6042 if (UseCompressedOops) {
6043 if (Universe::heap() != nullptr && !AOTCodeCache::is_on_for_dump()) {
6044 if (CompressedOops::base() == nullptr) {
6045 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6046 } else {
6047 mov64(r12_heapbase, (int64_t)CompressedOops::base());
6048 }
6049 } else {
6050 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
6051 }
6052 }
6053 }
6054
6055 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6056 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6057 // An inline type might be returned. If fields are in registers we
6058 // need to allocate an inline type instance and initialize it with
6059 // the value of the fields.
6060 Label skip;
6061 // We only need a new buffered inline type if a new one is not returned
6062 testptr(rax, 1);
6063 jcc(Assembler::zero, skip);
6064 int call_offset = -1;
6065
6066 #ifdef _LP64
6067 // The following code is similar to allocation code in TemplateTable::_new but has some slight differences,
6068 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6069 // allocating is not necessary if vk != nullptr, etc.
6070 Label slow_case;
6071 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6072 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
6073 if (vk != nullptr) {
6074 // Called from C1, where the return type is statically known.
6075 movptr(rbx, (intptr_t)vk->get_InlineKlass());
6076 jint lh = vk->layout_helper();
6077 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6078 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
6079 tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
6080 } else {
6081 jmp(slow_case);
6082 }
6083 } else {
6084 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
6085 mov(rbx, rax);
6086 andptr(rbx, -2);
6087 if (UseTLAB) {
6088 movl(r14, Address(rbx, Klass::layout_helper_offset()));
6089 testl(r14, Klass::_lh_instance_slow_path_bit);
6090 jcc(Assembler::notZero, slow_case);
6091 tlab_allocate(rax, r14, 0, r13, r14, slow_case);
6092 } else {
6093 jmp(slow_case);
6094 }
6095 }
6096 if (UseTLAB) {
6097 // 2. Initialize buffered inline instance header
6098 Register buffer_obj = rax;
6099 Register klass = rbx;
6100 if (UseCompactObjectHeaders) {
6101 Register mark_word = r13;
6102 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
6103 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
6104 } else {
6105 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
6106 xorl(r13, r13);
6107 store_klass_gap(buffer_obj, r13);
6108 if (vk == nullptr) {
6109 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
6110 mov(r13, klass);
6111 }
6112 store_klass(buffer_obj, klass, rscratch1);
6113 klass = r13;
6114 }
6115 // 3. Initialize its fields with an inline class specific handler
6116 if (vk != nullptr) {
6117 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6118 } else {
6119 movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
6120 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
6121 call(rbx);
6122 }
6123 jmp(skip);
6124 }
6125 bind(slow_case);
6126 // We failed to allocate a new inline type, fall back to a runtime
6127 // call. Some oop field may be live in some registers but we can't
6128 // tell. That runtime call will take care of preserving them
6129 // across a GC if there's one.
6130 mov(rax, rscratch1);
6131 #endif
6132
6133 if (from_interpreter) {
6134 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
6135 } else {
6136 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
6137 call_offset = offset();
6138 }
6139
6140 bind(skip);
6141 return call_offset;
6142 }
6143
6144 // Move a value between registers/stack slots and update the reg_state
6145 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6146 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6147 if (reg_state[to->value()] == reg_written) {
6148 return true; // Already written
6149 }
6150 if (from != to && bt != T_VOID) {
6151 if (reg_state[to->value()] == reg_readonly) {
6152 return false; // Not yet writable
6153 }
6154 if (from->is_reg()) {
6155 if (to->is_reg()) {
6156 if (from->is_XMMRegister()) {
6157 if (bt == T_DOUBLE) {
6158 movdbl(to->as_XMMRegister(), from->as_XMMRegister());
6159 } else {
6160 assert(bt == T_FLOAT, "must be float");
6161 movflt(to->as_XMMRegister(), from->as_XMMRegister());
6162 }
6163 } else {
6164 movq(to->as_Register(), from->as_Register());
6165 }
6166 } else {
6167 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6168 Address to_addr = Address(rsp, st_off);
6169 if (from->is_XMMRegister()) {
6170 if (bt == T_DOUBLE) {
6171 movdbl(to_addr, from->as_XMMRegister());
6172 } else {
6173 assert(bt == T_FLOAT, "must be float");
6174 movflt(to_addr, from->as_XMMRegister());
6175 }
6176 } else {
6177 movq(to_addr, from->as_Register());
6178 }
6179 }
6180 } else {
6181 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
6182 if (to->is_reg()) {
6183 if (to->is_XMMRegister()) {
6184 if (bt == T_DOUBLE) {
6185 movdbl(to->as_XMMRegister(), from_addr);
6186 } else {
6187 assert(bt == T_FLOAT, "must be float");
6188 movflt(to->as_XMMRegister(), from_addr);
6189 }
6190 } else {
6191 movq(to->as_Register(), from_addr);
6192 }
6193 } else {
6194 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6195 movq(r13, from_addr);
6196 movq(Address(rsp, st_off), r13);
6197 }
6198 }
6199 }
6200 // Update register states
6201 reg_state[from->value()] = reg_writable;
6202 reg_state[to->value()] = reg_written;
6203 return true;
6204 }
6205
6206 // Calculate the extra stack space required for packing or unpacking inline
6207 // args and adjust the stack pointer (see MacroAssembler::remove_frame).
6208 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6209 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
6210 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6211 assert(sp_inc > 0, "sanity");
6212 // Two additional slots to account for return address
6213 sp_inc += 2 * VMRegImpl::stack_slot_size;
6214
6215 push(rbp);
6216 subptr(rsp, sp_inc);
6217 #ifdef ASSERT
6218 movl(Address(rsp, 0), badRegWordVal);
6219 movl(Address(rsp, VMRegImpl::stack_slot_size), badRegWordVal);
6220 #endif
6221 return sp_inc + wordSize; // account for rbp space
6222 }
6223
6224 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
6225 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6226 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6227 RegState reg_state[]) {
6228 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6229 assert(from->is_valid(), "source must be valid");
6230 bool progress = false;
6231 #ifdef ASSERT
6232 const int start_offset = offset();
6233 #endif
6234
6235 Label L_null, L_notNull;
6236 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6237 Register tmp1 = r10;
6238 Register tmp2 = r13;
6239 Register fromReg = noreg;
6240 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
6241 bool done = true;
6242 bool mark_done = true;
6243 VMReg toReg;
6244 BasicType bt;
6245 // Check if argument requires a null check
6246 bool null_check = false;
6247 VMReg nullCheckReg;
6248 while (stream.next(nullCheckReg, bt)) {
6249 if (sig->at(stream.sig_index())._offset == -1) {
6250 null_check = true;
6251 break;
6252 }
6253 }
6254 stream.reset(sig_index, to_index);
6255 while (stream.next(toReg, bt)) {
6256 assert(toReg->is_valid(), "destination must be valid");
6257 int idx = (int)toReg->value();
6258 if (reg_state[idx] == reg_readonly) {
6259 if (idx != from->value()) {
6260 mark_done = false;
6261 }
6262 done = false;
6263 continue;
6264 } else if (reg_state[idx] == reg_written) {
6265 continue;
6266 }
6267 assert(reg_state[idx] == reg_writable, "must be writable");
6268 reg_state[idx] = reg_written;
6269 progress = true;
6270
6271 if (fromReg == noreg) {
6272 if (from->is_reg()) {
6273 fromReg = from->as_Register();
6274 } else {
6275 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6276 movq(tmp1, Address(rsp, st_off));
6277 fromReg = tmp1;
6278 }
6279 if (null_check) {
6280 // Nullable inline type argument, emit null check
6281 testptr(fromReg, fromReg);
6282 jcc(Assembler::zero, L_null);
6283 }
6284 }
6285 int off = sig->at(stream.sig_index())._offset;
6286 if (off == -1) {
6287 assert(null_check, "Missing null check at");
6288 if (toReg->is_stack()) {
6289 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6290 movq(Address(rsp, st_off), 1);
6291 } else {
6292 movq(toReg->as_Register(), 1);
6293 }
6294 continue;
6295 }
6296 if (sig->at(stream.sig_index())._vt_oop) {
6297 if (toReg->is_stack()) {
6298 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6299 movq(Address(rsp, st_off), fromReg);
6300 } else {
6301 movq(toReg->as_Register(), fromReg);
6302 }
6303 continue;
6304 }
6305 assert(off > 0, "offset in object should be positive");
6306 Address fromAddr = Address(fromReg, off);
6307 if (!toReg->is_XMMRegister()) {
6308 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6309 if (is_reference_type(bt)) {
6310 load_heap_oop(dst, fromAddr);
6311 } else {
6312 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6313 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6314 }
6315 if (toReg->is_stack()) {
6316 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6317 movq(Address(rsp, st_off), dst);
6318 }
6319 } else if (bt == T_DOUBLE) {
6320 movdbl(toReg->as_XMMRegister(), fromAddr);
6321 } else {
6322 assert(bt == T_FLOAT, "must be float");
6323 movflt(toReg->as_XMMRegister(), fromAddr);
6324 }
6325 }
6326 if (progress && null_check) {
6327 if (done) {
6328 jmp(L_notNull);
6329 bind(L_null);
6330 // Set null marker to zero to signal that the argument is null.
6331 // Also set all fields to zero since the runtime requires a canonical
6332 // representation of a flat null.
6333 stream.reset(sig_index, to_index);
6334 while (stream.next(toReg, bt)) {
6335 if (toReg->is_stack()) {
6336 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6337 movq(Address(rsp, st_off), 0);
6338 } else if (toReg->is_XMMRegister()) {
6339 xorps(toReg->as_XMMRegister(), toReg->as_XMMRegister());
6340 } else {
6341 xorl(toReg->as_Register(), toReg->as_Register());
6342 }
6343 }
6344 bind(L_notNull);
6345 } else {
6346 bind(L_null);
6347 }
6348 }
6349
6350 sig_index = stream.sig_index();
6351 to_index = stream.regs_index();
6352
6353 if (mark_done && reg_state[from->value()] != reg_written) {
6354 // This is okay because no one else will write to that slot
6355 reg_state[from->value()] = reg_writable;
6356 }
6357 from_index--;
6358 assert(progress || (start_offset == offset()), "should not emit code");
6359 return done;
6360 }
6361
6362 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6363 VMRegPair* from, int from_count, int& from_index, VMReg to,
6364 RegState reg_state[], Register val_array) {
6365 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
6366 assert(to->is_valid(), "destination must be valid");
6367
6368 if (reg_state[to->value()] == reg_written) {
6369 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6370 return true; // Already written
6371 }
6372
6373 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6374 Register val_obj_tmp = r11;
6375 Register from_reg_tmp = r14;
6376 Register tmp1 = r10;
6377 Register tmp2 = r13;
6378 Register tmp3 = rbx;
6379 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6380
6381 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6382
6383 if (reg_state[to->value()] == reg_readonly) {
6384 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6385 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6386 return false; // Not yet writable
6387 }
6388 val_obj = val_obj_tmp;
6389 }
6390
6391 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6392 VMReg fromReg;
6393 BasicType bt;
6394 Label L_null;
6395 while (stream.next(fromReg, bt)) {
6396 assert(fromReg->is_valid(), "source must be valid");
6397 reg_state[fromReg->value()] = reg_writable;
6398
6399 int off = sig->at(stream.sig_index())._offset;
6400 if (off == -1) {
6401 // Nullable inline type argument, emit null check
6402 Label L_notNull;
6403 if (fromReg->is_stack()) {
6404 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6405 testb(Address(rsp, ld_off), 1);
6406 } else {
6407 testb(fromReg->as_Register(), 1);
6408 }
6409 jcc(Assembler::notZero, L_notNull);
6410 movptr(val_obj, 0);
6411 jmp(L_null);
6412 bind(L_notNull);
6413 continue;
6414 }
6415 if (sig->at(stream.sig_index())._vt_oop) {
6416 // buffer argument: use if non null
6417 if (fromReg->is_stack()) {
6418 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6419 movptr(val_obj, Address(rsp, ld_off));
6420 } else {
6421 movptr(val_obj, fromReg->as_Register());
6422 }
6423 testptr(val_obj, val_obj);
6424 jcc(Assembler::notEqual, L_null);
6425 // otherwise get the buffer from the just allocated pool of buffers
6426 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
6427 load_heap_oop(val_obj, Address(val_array, index));
6428 continue;
6429 }
6430
6431 assert(off > 0, "offset in object should be positive");
6432 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6433
6434 // Pack the scalarized field into the value object.
6435 Address dst(val_obj, off);
6436 if (!fromReg->is_XMMRegister()) {
6437 Register src;
6438 if (fromReg->is_stack()) {
6439 src = from_reg_tmp;
6440 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6441 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
6442 } else {
6443 src = fromReg->as_Register();
6444 }
6445 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6446 if (is_reference_type(bt)) {
6447 // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
6448 mov(tmp3, val_obj);
6449 Address dst_with_tmp3(tmp3, off);
6450 store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6451 } else {
6452 store_sized_value(dst, src, size_in_bytes);
6453 }
6454 } else if (bt == T_DOUBLE) {
6455 movdbl(dst, fromReg->as_XMMRegister());
6456 } else {
6457 assert(bt == T_FLOAT, "must be float");
6458 movflt(dst, fromReg->as_XMMRegister());
6459 }
6460 }
6461 bind(L_null);
6462 sig_index = stream.sig_index();
6463 from_index = stream.regs_index();
6464
6465 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6466 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6467 assert(success, "to register must be writeable");
6468 return true;
6469 }
6470
6471 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6472 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
6473 }
6474
6475 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6476 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6477 if (needs_stack_repair) {
6478 // The method has a scalarized entry point (where fields of value object arguments
6479 // are passed through registers and stack), and a non-scalarized entry point (where
6480 // value object arguments are given as oops). The non-scalarized entry point will
6481 // first load each field of value object arguments and store them in registers and on
6482 // the stack in a way compatible with the scalarized entry point. To do so, some extra
6483 // stack space might be reserved (if argument registers are not enough). On leaving the
6484 // method, this space must be freed.
6485 //
6486 // In case we used the non-scalarized entry point the stack looks like this:
6487 //
6488 // | Arguments from caller |
6489 // |---------------------------| <-- caller's SP
6490 // | Return address #1 |
6491 // | Saved RBP #1 |
6492 // |---------------------------|
6493 // | Extension space for |
6494 // | inline arg (un)packing |
6495 // |---------------------------| <-- start of this method's frame
6496 // | Return address #2 |
6497 // | Saved RBP #2 |
6498 // |---------------------------| <-- RBP (with -XX:+PreserveFramePointer)
6499 // | sp_inc |
6500 // | method locals |
6501 // |---------------------------| <-- SP
6502 //
6503 // Space for the return pc and saved rbp is reserved twice. But only the #1 copies
6504 // contain the real values of return pc and saved rbp. The #2 copies are not reliable
6505 // and should not be used. They are mostly needed to add space between the extension
6506 // space and the locals, as there would be between the real arguments and the locals
6507 // if we don't need to do unpacking (from the scalarized entry point).
6508 //
6509 // When leaving, one must load RBP #1 into RBP, and use the copy #1 of the return address,
6510 // while keeping in mind that from the scalarized entry point, there will be only one
6511 // copy. Indeed, in the case we used the scalarized calling convention, the stack looks like this:
6512 //
6513 // | Arguments from caller |
6514 // |---------------------------| <-- caller's SP
6515 // | Return address |
6516 // | Saved RBP |
6517 // |---------------------------| <-- FP (with -XX:+PreserveFramePointer)
6518 // | sp_inc |
6519 // | method locals |
6520 // |---------------------------| <-- SP
6521 //
6522 // The sp_inc stack slot holds the total size of the frame, including the extension
6523 // space and copies #2 of the return address and the saved RBP (but never the copies
6524 // #1 of the return address and saved RBP). That is how to find the copies #1 of the
6525 // return address and saved rbp. This size is expressed in bytes. Be careful when using
6526 // it from C++ in pointer arithmetic you might need to divide it by wordSize.
6527
6528 // The stack increment resides just below the saved rbp
6529 addq(rsp, Address(rsp, initial_framesize - wordSize));
6530 pop(rbp);
6531 } else {
6532 if (initial_framesize > 0) {
6533 addq(rsp, initial_framesize);
6534 }
6535 pop(rbp);
6536 }
6537 }
6538
6539 #if COMPILER2_OR_JVMCI
6540
6541 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6542 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
6543 // cnt - number of qwords (8-byte words).
6544 // base - start address, qword aligned.
6545 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6546 bool use64byteVector = (MaxVectorSize == 64) && (CopyAVX3Threshold == 0);
6547 if (use64byteVector) {
6548 evpbroadcastq(xtmp, val, AVX_512bit);
6549 } else if (MaxVectorSize >= 32) {
6550 movdq(xtmp, val);
6551 punpcklqdq(xtmp, xtmp);
6552 vinserti128_high(xtmp, xtmp);
6553 } else {
6554 movdq(xtmp, val);
6555 punpcklqdq(xtmp, xtmp);
6556 }
6557 jmp(L_zero_64_bytes);
6558
6559 BIND(L_loop);
6560 if (MaxVectorSize >= 32) {
6561 fill64(base, 0, xtmp, use64byteVector);
6562 } else {
6563 movdqu(Address(base, 0), xtmp);
6564 movdqu(Address(base, 16), xtmp);
6565 movdqu(Address(base, 32), xtmp);
6566 movdqu(Address(base, 48), xtmp);
6567 }
6568 addptr(base, 64);
6569
6570 BIND(L_zero_64_bytes);
6571 subptr(cnt, 8);
6572 jccb(Assembler::greaterEqual, L_loop);
6573
6574 // Copy trailing 64 bytes
6575 if (use64byteVector) {
6576 addptr(cnt, 8);
6577 jccb(Assembler::equal, L_end);
6578 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
6579 jmp(L_end);
6580 } else {
6581 addptr(cnt, 4);
6582 jccb(Assembler::less, L_tail);
6583 if (MaxVectorSize >= 32) {
6584 vmovdqu(Address(base, 0), xtmp);
6585 } else {
6586 movdqu(Address(base, 0), xtmp);
6587 movdqu(Address(base, 16), xtmp);
6588 }
6589 }
6590 addptr(base, 32);
6591 subptr(cnt, 4);
6592
6593 BIND(L_tail);
6594 addptr(cnt, 4);
6595 jccb(Assembler::lessEqual, L_end);
6596 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6597 fill32_masked(3, base, 0, xtmp, mask, cnt, val);
6598 } else {
6599 decrement(cnt);
6600
6601 BIND(L_sloop);
6602 movq(Address(base, 0), xtmp);
6603 addptr(base, 8);
6604 decrement(cnt);
6605 jccb(Assembler::greaterEqual, L_sloop);
6606 }
6607 BIND(L_end);
6608 }
6609
6610 // Clearing constant sized memory using YMM/ZMM registers.
6611 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6612 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
6613 bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
6614
6615 int vector64_count = (cnt & (~0x7)) >> 3;
6616 cnt = cnt & 0x7;
6617 const int fill64_per_loop = 4;
6618 const int max_unrolled_fill64 = 8;
6619
6620 // 64 byte initialization loop.
6621 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
6622 int start64 = 0;
6623 if (vector64_count > max_unrolled_fill64) {
6624 Label LOOP;
6625 Register index = rtmp;
6626
6627 start64 = vector64_count - (vector64_count % fill64_per_loop);
6628
6629 movl(index, 0);
6630 BIND(LOOP);
6631 for (int i = 0; i < fill64_per_loop; i++) {
6632 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
6633 }
6634 addl(index, fill64_per_loop * 64);
6635 cmpl(index, start64 * 64);
6636 jccb(Assembler::less, LOOP);
6637 }
6638 for (int i = start64; i < vector64_count; i++) {
6639 fill64(base, i * 64, xtmp, use64byteVector);
6640 }
6641
6642 // Clear remaining 64 byte tail.
6643 int disp = vector64_count * 64;
6644 if (cnt) {
6645 switch (cnt) {
6646 case 1:
6647 movq(Address(base, disp), xtmp);
6648 break;
6649 case 2:
6650 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
6651 break;
6652 case 3:
6653 movl(rtmp, 0x7);
6654 kmovwl(mask, rtmp);
6655 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
6656 break;
6657 case 4:
6658 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6659 break;
6660 case 5:
6661 if (use64byteVector) {
6662 movl(rtmp, 0x1F);
6663 kmovwl(mask, rtmp);
6664 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6665 } else {
6666 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6667 movq(Address(base, disp + 32), xtmp);
6668 }
6669 break;
6670 case 6:
6671 if (use64byteVector) {
6672 movl(rtmp, 0x3F);
6673 kmovwl(mask, rtmp);
6674 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6675 } else {
6676 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6677 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
6678 }
6679 break;
6680 case 7:
6681 if (use64byteVector) {
6682 movl(rtmp, 0x7F);
6683 kmovwl(mask, rtmp);
6684 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6685 } else {
6686 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6687 movl(rtmp, 0x7);
6688 kmovwl(mask, rtmp);
6689 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
6690 }
6691 break;
6692 default:
6693 fatal("Unexpected length : %d\n",cnt);
6694 break;
6695 }
6696 }
6697 }
6698
6699 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
6700 bool is_large, bool word_copy_only, KRegister mask) {
6701 // cnt - number of qwords (8-byte words).
6702 // base - start address, qword aligned.
6703 // is_large - if optimizers know cnt is larger than InitArrayShortSize
6704 assert(base==rdi, "base register must be edi for rep stos");
6705 assert(val==rax, "val register must be eax for rep stos");
6706 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6707 assert(InitArrayShortSize % BytesPerLong == 0,
6708 "InitArrayShortSize should be the multiple of BytesPerLong");
6709
6710 Label DONE;
6711
6712 if (!is_large) {
6713 Label LOOP, LONG;
6714 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
6715 jccb(Assembler::greater, LONG);
6716
6717 decrement(cnt);
6718 jccb(Assembler::negative, DONE); // Zero length
6719
6720 // Use individual pointer-sized stores for small counts:
6721 BIND(LOOP);
6722 movptr(Address(base, cnt, Address::times_ptr), val);
6723 decrement(cnt);
6724 jccb(Assembler::greaterEqual, LOOP);
6725 jmpb(DONE);
6726
6727 BIND(LONG);
6728 }
6729
6730 // Use longer rep-prefixed ops for non-small counts:
6731 if (UseFastStosb && !word_copy_only) {
6732 shlptr(cnt, 3); // convert to number of bytes
6733 rep_stosb();
6734 } else if (UseXMMForObjInit) {
6735 xmm_clear_mem(base, cnt, val, xtmp, mask);
6736 } else {
6737 rep_stos();
6738 }
6739
6740 BIND(DONE);
6741 }
6742
6743 #endif //COMPILER2_OR_JVMCI
6744
6745
6746 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6747 Register to, Register value, Register count,
6748 Register rtmp, XMMRegister xtmp) {
6749 ShortBranchVerifier sbv(this);
6750 assert_different_registers(to, value, count, rtmp);
6751 Label L_exit;
6752 Label L_fill_2_bytes, L_fill_4_bytes;
6753
6754 #if defined(COMPILER2)
6755 if(MaxVectorSize >=32 &&
6756 VM_Version::supports_avx512vlbw() &&
6757 VM_Version::supports_bmi2()) {
6758 generate_fill_avx3(t, to, value, count, rtmp, xtmp);
6759 return;
6760 }
6761 #endif
6762
6763 int shift = -1;
6764 switch (t) {
6765 case T_BYTE:
6766 shift = 2;
6767 break;
6768 case T_SHORT:
6769 shift = 1;
6770 break;
6771 case T_INT:
6772 shift = 0;
6773 break;
6774 default: ShouldNotReachHere();
6775 }
6776
6777 if (t == T_BYTE) {
6778 andl(value, 0xff);
6779 movl(rtmp, value);
6780 shll(rtmp, 8);
6781 orl(value, rtmp);
6782 }
6783 if (t == T_SHORT) {
6784 andl(value, 0xffff);
6785 }
6786 if (t == T_BYTE || t == T_SHORT) {
6787 movl(rtmp, value);
6788 shll(rtmp, 16);
6789 orl(value, rtmp);
6790 }
6791
6792 cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
6793 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
6794 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
6795 Label L_skip_align2;
6796 // align source address at 4 bytes address boundary
6797 if (t == T_BYTE) {
6798 Label L_skip_align1;
6799 // One byte misalignment happens only for byte arrays
6800 testptr(to, 1);
6801 jccb(Assembler::zero, L_skip_align1);
6802 movb(Address(to, 0), value);
6803 increment(to);
6804 decrement(count);
6805 BIND(L_skip_align1);
6806 }
6807 // Two bytes misalignment happens only for byte and short (char) arrays
6808 testptr(to, 2);
6809 jccb(Assembler::zero, L_skip_align2);
6810 movw(Address(to, 0), value);
6811 addptr(to, 2);
6812 subptr(count, 1<<(shift-1));
6813 BIND(L_skip_align2);
6814 }
6815 {
6816 Label L_fill_32_bytes;
6817 if (!UseUnalignedLoadStores) {
6818 // align to 8 bytes, we know we are 4 byte aligned to start
6819 testptr(to, 4);
6820 jccb(Assembler::zero, L_fill_32_bytes);
6821 movl(Address(to, 0), value);
6822 addptr(to, 4);
6823 subptr(count, 1<<shift);
6824 }
6825 BIND(L_fill_32_bytes);
6826 {
6827 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
6828 movdl(xtmp, value);
6829 if (UseAVX >= 2 && UseUnalignedLoadStores) {
6830 Label L_check_fill_32_bytes;
6831 if (UseAVX > 2) {
6832 // Fill 64-byte chunks
6833 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
6834
6835 // If number of bytes to fill < CopyAVX3Threshold, perform fill using AVX2
6836 cmpptr(count, CopyAVX3Threshold);
6837 jccb(Assembler::below, L_check_fill_64_bytes_avx2);
6838
6839 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
6840
6841 subptr(count, 16 << shift);
6842 jcc(Assembler::less, L_check_fill_32_bytes);
6843 align(16);
6844
6845 BIND(L_fill_64_bytes_loop_avx3);
6846 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
6847 addptr(to, 64);
6848 subptr(count, 16 << shift);
6849 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
6850 jmpb(L_check_fill_32_bytes);
6851
6852 BIND(L_check_fill_64_bytes_avx2);
6853 }
6854 // Fill 64-byte chunks
6855 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
6856
6857 subptr(count, 16 << shift);
6858 jcc(Assembler::less, L_check_fill_32_bytes);
6859
6860 // align data for 64-byte chunks
6861 Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
6862 if (EnableX86ECoreOpts) {
6863 // align 'big' arrays to cache lines to minimize split_stores
6864 cmpptr(count, 96 << shift);
6865 jcc(Assembler::below, L_fill_64_bytes_loop);
6866
6867 // Find the bytes needed for alignment
6868 movptr(rtmp, to);
6869 andptr(rtmp, 0x1c);
6870 jcc(Assembler::zero, L_fill_64_bytes_loop);
6871 negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
6872 addptr(rtmp, 32);
6873 shrptr(rtmp, 2 - shift);// get number of elements from bytes
6874 subptr(count, rtmp); // adjust count by number of elements
6875
6876 align(16);
6877 BIND(L_align_64_bytes_loop);
6878 movdl(Address(to, 0), xtmp);
6879 addptr(to, 4);
6880 subptr(rtmp, 1 << shift);
6881 jcc(Assembler::greater, L_align_64_bytes_loop);
6882 }
6883
6884 align(16);
6885 BIND(L_fill_64_bytes_loop);
6886 vmovdqu(Address(to, 0), xtmp);
6887 vmovdqu(Address(to, 32), xtmp);
6888 addptr(to, 64);
6889 subptr(count, 16 << shift);
6890 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
6891
6892 align(16);
6893 BIND(L_check_fill_32_bytes);
6894 addptr(count, 8 << shift);
6895 jccb(Assembler::less, L_check_fill_8_bytes);
6896 vmovdqu(Address(to, 0), xtmp);
6897 addptr(to, 32);
6898 subptr(count, 8 << shift);
6899
6900 BIND(L_check_fill_8_bytes);
6901 // clean upper bits of YMM registers
6902 movdl(xtmp, value);
6903 pshufd(xtmp, xtmp, 0);
6904 } else {
6905 // Fill 32-byte chunks
6906 pshufd(xtmp, xtmp, 0);
6907
6908 subptr(count, 8 << shift);
6909 jcc(Assembler::less, L_check_fill_8_bytes);
6910 align(16);
6911
6912 BIND(L_fill_32_bytes_loop);
6913
6914 if (UseUnalignedLoadStores) {
6915 movdqu(Address(to, 0), xtmp);
6916 movdqu(Address(to, 16), xtmp);
6917 } else {
6918 movq(Address(to, 0), xtmp);
6919 movq(Address(to, 8), xtmp);
6920 movq(Address(to, 16), xtmp);
6921 movq(Address(to, 24), xtmp);
6922 }
6923
6924 addptr(to, 32);
6925 subptr(count, 8 << shift);
6926 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
6927
6928 BIND(L_check_fill_8_bytes);
6929 }
6930 addptr(count, 8 << shift);
6931 jccb(Assembler::zero, L_exit);
6932 jmpb(L_fill_8_bytes);
6933
6934 //
6935 // length is too short, just fill qwords
6936 //
6937 align(16);
6938 BIND(L_fill_8_bytes_loop);
6939 movq(Address(to, 0), xtmp);
6940 addptr(to, 8);
6941 BIND(L_fill_8_bytes);
6942 subptr(count, 1 << (shift + 1));
6943 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
6944 }
6945 }
6946
6947 Label L_fill_4_bytes_loop;
6948 testl(count, 1 << shift);
6949 jccb(Assembler::zero, L_fill_2_bytes);
6950
6951 align(16);
6952 BIND(L_fill_4_bytes_loop);
6953 movl(Address(to, 0), value);
6954 addptr(to, 4);
6955
6956 BIND(L_fill_4_bytes);
6957 subptr(count, 1 << shift);
6958 jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
6959
6960 if (t == T_BYTE || t == T_SHORT) {
6961 Label L_fill_byte;
6962 BIND(L_fill_2_bytes);
6963 // fill trailing 2 bytes
6964 testl(count, 1<<(shift-1));
6965 jccb(Assembler::zero, L_fill_byte);
6966 movw(Address(to, 0), value);
6967 if (t == T_BYTE) {
6968 addptr(to, 2);
6969 BIND(L_fill_byte);
6970 // fill trailing byte
6971 testl(count, 1);
6972 jccb(Assembler::zero, L_exit);
6973 movb(Address(to, 0), value);
6974 } else {
6975 BIND(L_fill_byte);
6976 }
6977 } else {
6978 BIND(L_fill_2_bytes);
6979 }
6980 BIND(L_exit);
6981 }
6982
6983 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
6984 switch(type) {
6985 case T_BYTE:
6986 case T_BOOLEAN:
6987 evpbroadcastb(dst, src, vector_len);
6988 break;
6989 case T_SHORT:
6990 case T_CHAR:
6991 evpbroadcastw(dst, src, vector_len);
6992 break;
6993 case T_INT:
6994 case T_FLOAT:
6995 evpbroadcastd(dst, src, vector_len);
6996 break;
6997 case T_LONG:
6998 case T_DOUBLE:
6999 evpbroadcastq(dst, src, vector_len);
7000 break;
7001 default:
7002 fatal("Unhandled type : %s", type2name(type));
7003 break;
7004 }
7005 }
7006
7007 // Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
7008 //
7009 // @IntrinsicCandidate
7010 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
7011 // char[] sa, int sp, byte[] da, int dp, int len) {
7012 // int i = 0;
7013 // for (; i < len; i++) {
7014 // char c = sa[sp++];
7015 // if (c > '\u00FF')
7016 // break;
7017 // da[dp++] = (byte) c;
7018 // }
7019 // return i;
7020 // }
7021 //
7022 // @IntrinsicCandidate
7023 // int java.lang.StringCoding.encodeISOArray0(
7024 // byte[] sa, int sp, byte[] da, int dp, int len) {
7025 // int i = 0;
7026 // for (; i < len; i++) {
7027 // char c = StringUTF16.getChar(sa, sp++);
7028 // if (c > '\u00FF')
7029 // break;
7030 // da[dp++] = (byte) c;
7031 // }
7032 // return i;
7033 // }
7034 //
7035 // @IntrinsicCandidate
7036 // int java.lang.StringCoding.encodeAsciiArray0(
7037 // char[] sa, int sp, byte[] da, int dp, int len) {
7038 // int i = 0;
7039 // for (; i < len; i++) {
7040 // char c = sa[sp++];
7041 // if (c >= '\u0080')
7042 // break;
7043 // da[dp++] = (byte) c;
7044 // }
7045 // return i;
7046 // }
7047 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7048 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7049 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7050 Register tmp5, Register result, bool ascii) {
7051
7052 // rsi: src
7053 // rdi: dst
7054 // rdx: len
7055 // rcx: tmp5
7056 // rax: result
7057 ShortBranchVerifier sbv(this);
7058 assert_different_registers(src, dst, len, tmp5, result);
7059 Label L_done, L_copy_1_char, L_copy_1_char_exit;
7060
7061 int mask = ascii ? 0xff80ff80 : 0xff00ff00;
7062 int short_mask = ascii ? 0xff80 : 0xff00;
7063
7064 // set result
7065 xorl(result, result);
7066 // check for zero length
7067 testl(len, len);
7068 jcc(Assembler::zero, L_done);
7069
7070 movl(result, len);
7071
7072 // Setup pointers
7073 lea(src, Address(src, len, Address::times_2)); // char[]
7074 lea(dst, Address(dst, len, Address::times_1)); // byte[]
7075 negptr(len);
7076
7077 if (UseSSE42Intrinsics || UseAVX >= 2) {
7078 Label L_copy_8_chars, L_copy_8_chars_exit;
7079 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7080
7081 if (UseAVX >= 2) {
7082 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7083 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7084 movdl(tmp1Reg, tmp5);
7085 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
7086 jmp(L_chars_32_check);
7087
7088 bind(L_copy_32_chars);
7089 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7090 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7091 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7092 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7093 jccb(Assembler::notZero, L_copy_32_chars_exit);
7094 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7095 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
7096 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7097
7098 bind(L_chars_32_check);
7099 addptr(len, 32);
7100 jcc(Assembler::lessEqual, L_copy_32_chars);
7101
7102 bind(L_copy_32_chars_exit);
7103 subptr(len, 16);
7104 jccb(Assembler::greater, L_copy_16_chars_exit);
7105
7106 } else if (UseSSE42Intrinsics) {
7107 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7108 movdl(tmp1Reg, tmp5);
7109 pshufd(tmp1Reg, tmp1Reg, 0);
7110 jmpb(L_chars_16_check);
7111 }
7112
7113 bind(L_copy_16_chars);
7114 if (UseAVX >= 2) {
7115 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7116 vptest(tmp2Reg, tmp1Reg);
7117 jcc(Assembler::notZero, L_copy_16_chars_exit);
7118 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
7119 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
7120 } else {
7121 if (UseAVX > 0) {
7122 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7123 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7124 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
7125 } else {
7126 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7127 por(tmp2Reg, tmp3Reg);
7128 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7129 por(tmp2Reg, tmp4Reg);
7130 }
7131 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7132 jccb(Assembler::notZero, L_copy_16_chars_exit);
7133 packuswb(tmp3Reg, tmp4Reg);
7134 }
7135 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7136
7137 bind(L_chars_16_check);
7138 addptr(len, 16);
7139 jcc(Assembler::lessEqual, L_copy_16_chars);
7140
7141 bind(L_copy_16_chars_exit);
7142 if (UseAVX >= 2) {
7143 // clean upper bits of YMM registers
7144 vpxor(tmp2Reg, tmp2Reg);
7145 vpxor(tmp3Reg, tmp3Reg);
7146 vpxor(tmp4Reg, tmp4Reg);
7147 movdl(tmp1Reg, tmp5);
7148 pshufd(tmp1Reg, tmp1Reg, 0);
7149 }
7150 subptr(len, 8);
7151 jccb(Assembler::greater, L_copy_8_chars_exit);
7152
7153 bind(L_copy_8_chars);
7154 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7155 ptest(tmp3Reg, tmp1Reg);
7156 jccb(Assembler::notZero, L_copy_8_chars_exit);
7157 packuswb(tmp3Reg, tmp1Reg);
7158 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7159 addptr(len, 8);
7160 jccb(Assembler::lessEqual, L_copy_8_chars);
7161
7162 bind(L_copy_8_chars_exit);
7163 subptr(len, 8);
7164 jccb(Assembler::zero, L_done);
7165 }
7166
7167 bind(L_copy_1_char);
7168 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7169 testl(tmp5, short_mask); // check if Unicode or non-ASCII char
7170 jccb(Assembler::notZero, L_copy_1_char_exit);
7171 movb(Address(dst, len, Address::times_1, 0), tmp5);
7172 addptr(len, 1);
7173 jccb(Assembler::less, L_copy_1_char);
7174
7175 bind(L_copy_1_char_exit);
7176 addptr(result, len); // len is negative count of not processed elements
7177
7178 bind(L_done);
7179 }
7180
7181 /**
7182 * Helper for multiply_to_len().
7183 */
7184 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7185 addq(dest_lo, src1);
7186 adcq(dest_hi, 0);
7187 addq(dest_lo, src2);
7188 adcq(dest_hi, 0);
7189 }
7190
7191 /**
7192 * Multiply 64 bit by 64 bit first loop.
7193 */
7194 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7195 Register y, Register y_idx, Register z,
7196 Register carry, Register product,
7197 Register idx, Register kdx) {
7198 //
7199 // jlong carry, x[], y[], z[];
7200 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7201 // huge_128 product = y[idx] * x[xstart] + carry;
7202 // z[kdx] = (jlong)product;
7203 // carry = (jlong)(product >>> 64);
7204 // }
7205 // z[xstart] = carry;
7206 //
7207
7208 Label L_first_loop, L_first_loop_exit;
7209 Label L_one_x, L_one_y, L_multiply;
7210
7211 decrementl(xstart);
7212 jcc(Assembler::negative, L_one_x);
7213
7214 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7215 rorq(x_xstart, 32); // convert big-endian to little-endian
7216
7217 bind(L_first_loop);
7218 decrementl(idx);
7219 jcc(Assembler::negative, L_first_loop_exit);
7220 decrementl(idx);
7221 jcc(Assembler::negative, L_one_y);
7222 movq(y_idx, Address(y, idx, Address::times_4, 0));
7223 rorq(y_idx, 32); // convert big-endian to little-endian
7224 bind(L_multiply);
7225 movq(product, x_xstart);
7226 mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7227 addq(product, carry);
7228 adcq(rdx, 0);
7229 subl(kdx, 2);
7230 movl(Address(z, kdx, Address::times_4, 4), product);
7231 shrq(product, 32);
7232 movl(Address(z, kdx, Address::times_4, 0), product);
7233 movq(carry, rdx);
7234 jmp(L_first_loop);
7235
7236 bind(L_one_y);
7237 movl(y_idx, Address(y, 0));
7238 jmp(L_multiply);
7239
7240 bind(L_one_x);
7241 movl(x_xstart, Address(x, 0));
7242 jmp(L_first_loop);
7243
7244 bind(L_first_loop_exit);
7245 }
7246
7247 /**
7248 * Multiply 64 bit by 64 bit and add 128 bit.
7249 */
7250 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7251 Register yz_idx, Register idx,
7252 Register carry, Register product, int offset) {
7253 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7254 // z[kdx] = (jlong)product;
7255
7256 movq(yz_idx, Address(y, idx, Address::times_4, offset));
7257 rorq(yz_idx, 32); // convert big-endian to little-endian
7258 movq(product, x_xstart);
7259 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7260 movq(yz_idx, Address(z, idx, Address::times_4, offset));
7261 rorq(yz_idx, 32); // convert big-endian to little-endian
7262
7263 add2_with_carry(rdx, product, carry, yz_idx);
7264
7265 movl(Address(z, idx, Address::times_4, offset+4), product);
7266 shrq(product, 32);
7267 movl(Address(z, idx, Address::times_4, offset), product);
7268
7269 }
7270
7271 /**
7272 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7273 */
7274 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7275 Register yz_idx, Register idx, Register jdx,
7276 Register carry, Register product,
7277 Register carry2) {
7278 // jlong carry, x[], y[], z[];
7279 // int kdx = ystart+1;
7280 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7281 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7282 // z[kdx+idx+1] = (jlong)product;
7283 // jlong carry2 = (jlong)(product >>> 64);
7284 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7285 // z[kdx+idx] = (jlong)product;
7286 // carry = (jlong)(product >>> 64);
7287 // }
7288 // idx += 2;
7289 // if (idx > 0) {
7290 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7291 // z[kdx+idx] = (jlong)product;
7292 // carry = (jlong)(product >>> 64);
7293 // }
7294 //
7295
7296 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7297
7298 movl(jdx, idx);
7299 andl(jdx, 0xFFFFFFFC);
7300 shrl(jdx, 2);
7301
7302 bind(L_third_loop);
7303 subl(jdx, 1);
7304 jcc(Assembler::negative, L_third_loop_exit);
7305 subl(idx, 4);
7306
7307 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7308 movq(carry2, rdx);
7309
7310 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7311 movq(carry, rdx);
7312 jmp(L_third_loop);
7313
7314 bind (L_third_loop_exit);
7315
7316 andl (idx, 0x3);
7317 jcc(Assembler::zero, L_post_third_loop_done);
7318
7319 Label L_check_1;
7320 subl(idx, 2);
7321 jcc(Assembler::negative, L_check_1);
7322
7323 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7324 movq(carry, rdx);
7325
7326 bind (L_check_1);
7327 addl (idx, 0x2);
7328 andl (idx, 0x1);
7329 subl(idx, 1);
7330 jcc(Assembler::negative, L_post_third_loop_done);
7331
7332 movl(yz_idx, Address(y, idx, Address::times_4, 0));
7333 movq(product, x_xstart);
7334 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7335 movl(yz_idx, Address(z, idx, Address::times_4, 0));
7336
7337 add2_with_carry(rdx, product, yz_idx, carry);
7338
7339 movl(Address(z, idx, Address::times_4, 0), product);
7340 shrq(product, 32);
7341
7342 shlq(rdx, 32);
7343 orq(product, rdx);
7344 movq(carry, product);
7345
7346 bind(L_post_third_loop_done);
7347 }
7348
7349 /**
7350 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7351 *
7352 */
7353 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7354 Register carry, Register carry2,
7355 Register idx, Register jdx,
7356 Register yz_idx1, Register yz_idx2,
7357 Register tmp, Register tmp3, Register tmp4) {
7358 assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7359
7360 // jlong carry, x[], y[], z[];
7361 // int kdx = ystart+1;
7362 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7363 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7364 // jlong carry2 = (jlong)(tmp3 >>> 64);
7365 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2;
7366 // carry = (jlong)(tmp4 >>> 64);
7367 // z[kdx+idx+1] = (jlong)tmp3;
7368 // z[kdx+idx] = (jlong)tmp4;
7369 // }
7370 // idx += 2;
7371 // if (idx > 0) {
7372 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7373 // z[kdx+idx] = (jlong)yz_idx1;
7374 // carry = (jlong)(yz_idx1 >>> 64);
7375 // }
7376 //
7377
7378 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7379
7380 movl(jdx, idx);
7381 andl(jdx, 0xFFFFFFFC);
7382 shrl(jdx, 2);
7383
7384 bind(L_third_loop);
7385 subl(jdx, 1);
7386 jcc(Assembler::negative, L_third_loop_exit);
7387 subl(idx, 4);
7388
7389 movq(yz_idx1, Address(y, idx, Address::times_4, 8));
7390 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7391 movq(yz_idx2, Address(y, idx, Address::times_4, 0));
7392 rorxq(yz_idx2, yz_idx2, 32);
7393
7394 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7395 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp
7396
7397 movq(yz_idx1, Address(z, idx, Address::times_4, 8));
7398 rorxq(yz_idx1, yz_idx1, 32);
7399 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7400 rorxq(yz_idx2, yz_idx2, 32);
7401
7402 if (VM_Version::supports_adx()) {
7403 adcxq(tmp3, carry);
7404 adoxq(tmp3, yz_idx1);
7405
7406 adcxq(tmp4, tmp);
7407 adoxq(tmp4, yz_idx2);
7408
7409 movl(carry, 0); // does not affect flags
7410 adcxq(carry2, carry);
7411 adoxq(carry2, carry);
7412 } else {
7413 add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7414 add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7415 }
7416 movq(carry, carry2);
7417
7418 movl(Address(z, idx, Address::times_4, 12), tmp3);
7419 shrq(tmp3, 32);
7420 movl(Address(z, idx, Address::times_4, 8), tmp3);
7421
7422 movl(Address(z, idx, Address::times_4, 4), tmp4);
7423 shrq(tmp4, 32);
7424 movl(Address(z, idx, Address::times_4, 0), tmp4);
7425
7426 jmp(L_third_loop);
7427
7428 bind (L_third_loop_exit);
7429
7430 andl (idx, 0x3);
7431 jcc(Assembler::zero, L_post_third_loop_done);
7432
7433 Label L_check_1;
7434 subl(idx, 2);
7435 jcc(Assembler::negative, L_check_1);
7436
7437 movq(yz_idx1, Address(y, idx, Address::times_4, 0));
7438 rorxq(yz_idx1, yz_idx1, 32);
7439 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7440 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7441 rorxq(yz_idx2, yz_idx2, 32);
7442
7443 add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7444
7445 movl(Address(z, idx, Address::times_4, 4), tmp3);
7446 shrq(tmp3, 32);
7447 movl(Address(z, idx, Address::times_4, 0), tmp3);
7448 movq(carry, tmp4);
7449
7450 bind (L_check_1);
7451 addl (idx, 0x2);
7452 andl (idx, 0x1);
7453 subl(idx, 1);
7454 jcc(Assembler::negative, L_post_third_loop_done);
7455 movl(tmp4, Address(y, idx, Address::times_4, 0));
7456 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3
7457 movl(tmp4, Address(z, idx, Address::times_4, 0));
7458
7459 add2_with_carry(carry2, tmp3, tmp4, carry);
7460
7461 movl(Address(z, idx, Address::times_4, 0), tmp3);
7462 shrq(tmp3, 32);
7463
7464 shlq(carry2, 32);
7465 orq(tmp3, carry2);
7466 movq(carry, tmp3);
7467
7468 bind(L_post_third_loop_done);
7469 }
7470
7471 /**
7472 * Code for BigInteger::multiplyToLen() intrinsic.
7473 *
7474 * rdi: x
7475 * rax: xlen
7476 * rsi: y
7477 * rcx: ylen
7478 * r8: z
7479 * r11: tmp0
7480 * r12: tmp1
7481 * r13: tmp2
7482 * r14: tmp3
7483 * r15: tmp4
7484 * rbx: tmp5
7485 *
7486 */
7487 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
7488 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7489 ShortBranchVerifier sbv(this);
7490 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7491
7492 push(tmp0);
7493 push(tmp1);
7494 push(tmp2);
7495 push(tmp3);
7496 push(tmp4);
7497 push(tmp5);
7498
7499 push(xlen);
7500
7501 const Register idx = tmp1;
7502 const Register kdx = tmp2;
7503 const Register xstart = tmp3;
7504
7505 const Register y_idx = tmp4;
7506 const Register carry = tmp5;
7507 const Register product = xlen;
7508 const Register x_xstart = tmp0;
7509
7510 // First Loop.
7511 //
7512 // final static long LONG_MASK = 0xffffffffL;
7513 // int xstart = xlen - 1;
7514 // int ystart = ylen - 1;
7515 // long carry = 0;
7516 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7517 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7518 // z[kdx] = (int)product;
7519 // carry = product >>> 32;
7520 // }
7521 // z[xstart] = (int)carry;
7522 //
7523
7524 movl(idx, ylen); // idx = ylen;
7525 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
7526 xorq(carry, carry); // carry = 0;
7527
7528 Label L_done;
7529
7530 movl(xstart, xlen);
7531 decrementl(xstart);
7532 jcc(Assembler::negative, L_done);
7533
7534 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7535
7536 Label L_second_loop;
7537 testl(kdx, kdx);
7538 jcc(Assembler::zero, L_second_loop);
7539
7540 Label L_carry;
7541 subl(kdx, 1);
7542 jcc(Assembler::zero, L_carry);
7543
7544 movl(Address(z, kdx, Address::times_4, 0), carry);
7545 shrq(carry, 32);
7546 subl(kdx, 1);
7547
7548 bind(L_carry);
7549 movl(Address(z, kdx, Address::times_4, 0), carry);
7550
7551 // Second and third (nested) loops.
7552 //
7553 // for (int i = xstart-1; i >= 0; i--) { // Second loop
7554 // carry = 0;
7555 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7556 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7557 // (z[k] & LONG_MASK) + carry;
7558 // z[k] = (int)product;
7559 // carry = product >>> 32;
7560 // }
7561 // z[i] = (int)carry;
7562 // }
7563 //
7564 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7565
7566 const Register jdx = tmp1;
7567
7568 bind(L_second_loop);
7569 xorl(carry, carry); // carry = 0;
7570 movl(jdx, ylen); // j = ystart+1
7571
7572 subl(xstart, 1); // i = xstart-1;
7573 jcc(Assembler::negative, L_done);
7574
7575 push (z);
7576
7577 Label L_last_x;
7578 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7579 subl(xstart, 1); // i = xstart-1;
7580 jcc(Assembler::negative, L_last_x);
7581
7582 if (UseBMI2Instructions) {
7583 movq(rdx, Address(x, xstart, Address::times_4, 0));
7584 rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7585 } else {
7586 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7587 rorq(x_xstart, 32); // convert big-endian to little-endian
7588 }
7589
7590 Label L_third_loop_prologue;
7591 bind(L_third_loop_prologue);
7592
7593 push (x);
7594 push (xstart);
7595 push (ylen);
7596
7597
7598 if (UseBMI2Instructions) {
7599 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7600 } else { // !UseBMI2Instructions
7601 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7602 }
7603
7604 pop(ylen);
7605 pop(xlen);
7606 pop(x);
7607 pop(z);
7608
7609 movl(tmp3, xlen);
7610 addl(tmp3, 1);
7611 movl(Address(z, tmp3, Address::times_4, 0), carry);
7612 subl(tmp3, 1);
7613 jccb(Assembler::negative, L_done);
7614
7615 shrq(carry, 32);
7616 movl(Address(z, tmp3, Address::times_4, 0), carry);
7617 jmp(L_second_loop);
7618
7619 // Next infrequent code is moved outside loops.
7620 bind(L_last_x);
7621 if (UseBMI2Instructions) {
7622 movl(rdx, Address(x, 0));
7623 } else {
7624 movl(x_xstart, Address(x, 0));
7625 }
7626 jmp(L_third_loop_prologue);
7627
7628 bind(L_done);
7629
7630 pop(xlen);
7631
7632 pop(tmp5);
7633 pop(tmp4);
7634 pop(tmp3);
7635 pop(tmp2);
7636 pop(tmp1);
7637 pop(tmp0);
7638 }
7639
7640 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
7641 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
7642 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
7643 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
7644 Label VECTOR8_TAIL, VECTOR4_TAIL;
7645 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
7646 Label SAME_TILL_END, DONE;
7647 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
7648
7649 //scale is in rcx in both Win64 and Unix
7650 ShortBranchVerifier sbv(this);
7651
7652 shlq(length);
7653 xorq(result, result);
7654
7655 if ((AVX3Threshold == 0) && (UseAVX > 2) &&
7656 VM_Version::supports_avx512vlbw() && UseCountTrailingZerosInstruction) {
7657 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
7658
7659 cmpq(length, 64);
7660 jcc(Assembler::less, VECTOR32_TAIL);
7661
7662 movq(tmp1, length);
7663 andq(tmp1, 0x3F); // tail count
7664 andq(length, ~(0x3F)); //vector count
7665
7666 bind(VECTOR64_LOOP);
7667 // AVX512 code to compare 64 byte vectors.
7668 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
7669 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
7670 kortestql(k7, k7);
7671 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch
7672 addq(result, 64);
7673 subq(length, 64);
7674 jccb(Assembler::notZero, VECTOR64_LOOP);
7675
7676 //bind(VECTOR64_TAIL);
7677 testq(tmp1, tmp1);
7678 jcc(Assembler::zero, SAME_TILL_END);
7679
7680 //bind(VECTOR64_TAIL);
7681 // AVX512 code to compare up to 63 byte vectors.
7682 mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
7683 shlxq(tmp2, tmp2, tmp1);
7684 notq(tmp2);
7685 kmovql(k3, tmp2);
7686
7687 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
7688 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
7689
7690 ktestql(k7, k3);
7691 jcc(Assembler::below, SAME_TILL_END); // not mismatch
7692
7693 bind(VECTOR64_NOT_EQUAL);
7694 kmovql(tmp1, k7);
7695 notq(tmp1);
7696 tzcntq(tmp1, tmp1);
7697 addq(result, tmp1);
7698 shrq(result);
7699 jmp(DONE);
7700 bind(VECTOR32_TAIL);
7701 }
7702
7703 cmpq(length, 8);
7704 jcc(Assembler::equal, VECTOR8_LOOP);
7705 jcc(Assembler::less, VECTOR4_TAIL);
7706
7707 if (UseAVX >= 2) {
7708 Label VECTOR16_TAIL, VECTOR32_LOOP;
7709
7710 cmpq(length, 16);
7711 jcc(Assembler::equal, VECTOR16_LOOP);
7712 jcc(Assembler::less, VECTOR8_LOOP);
7713
7714 cmpq(length, 32);
7715 jccb(Assembler::less, VECTOR16_TAIL);
7716
7717 subq(length, 32);
7718 bind(VECTOR32_LOOP);
7719 vmovdqu(rymm0, Address(obja, result));
7720 vmovdqu(rymm1, Address(objb, result));
7721 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
7722 vptest(rymm2, rymm2);
7723 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
7724 addq(result, 32);
7725 subq(length, 32);
7726 jcc(Assembler::greaterEqual, VECTOR32_LOOP);
7727 addq(length, 32);
7728 jcc(Assembler::equal, SAME_TILL_END);
7729 //falling through if less than 32 bytes left //close the branch here.
7730
7731 bind(VECTOR16_TAIL);
7732 cmpq(length, 16);
7733 jccb(Assembler::less, VECTOR8_TAIL);
7734 bind(VECTOR16_LOOP);
7735 movdqu(rymm0, Address(obja, result));
7736 movdqu(rymm1, Address(objb, result));
7737 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
7738 ptest(rymm2, rymm2);
7739 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7740 addq(result, 16);
7741 subq(length, 16);
7742 jcc(Assembler::equal, SAME_TILL_END);
7743 //falling through if less than 16 bytes left
7744 } else {//regular intrinsics
7745
7746 cmpq(length, 16);
7747 jccb(Assembler::less, VECTOR8_TAIL);
7748
7749 subq(length, 16);
7750 bind(VECTOR16_LOOP);
7751 movdqu(rymm0, Address(obja, result));
7752 movdqu(rymm1, Address(objb, result));
7753 pxor(rymm0, rymm1);
7754 ptest(rymm0, rymm0);
7755 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7756 addq(result, 16);
7757 subq(length, 16);
7758 jccb(Assembler::greaterEqual, VECTOR16_LOOP);
7759 addq(length, 16);
7760 jcc(Assembler::equal, SAME_TILL_END);
7761 //falling through if less than 16 bytes left
7762 }
7763
7764 bind(VECTOR8_TAIL);
7765 cmpq(length, 8);
7766 jccb(Assembler::less, VECTOR4_TAIL);
7767 bind(VECTOR8_LOOP);
7768 movq(tmp1, Address(obja, result));
7769 movq(tmp2, Address(objb, result));
7770 xorq(tmp1, tmp2);
7771 testq(tmp1, tmp1);
7772 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
7773 addq(result, 8);
7774 subq(length, 8);
7775 jcc(Assembler::equal, SAME_TILL_END);
7776 //falling through if less than 8 bytes left
7777
7778 bind(VECTOR4_TAIL);
7779 cmpq(length, 4);
7780 jccb(Assembler::less, BYTES_TAIL);
7781 bind(VECTOR4_LOOP);
7782 movl(tmp1, Address(obja, result));
7783 xorl(tmp1, Address(objb, result));
7784 testl(tmp1, tmp1);
7785 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
7786 addq(result, 4);
7787 subq(length, 4);
7788 jcc(Assembler::equal, SAME_TILL_END);
7789 //falling through if less than 4 bytes left
7790
7791 bind(BYTES_TAIL);
7792 bind(BYTES_LOOP);
7793 load_unsigned_byte(tmp1, Address(obja, result));
7794 load_unsigned_byte(tmp2, Address(objb, result));
7795 xorl(tmp1, tmp2);
7796 testl(tmp1, tmp1);
7797 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7798 decq(length);
7799 jcc(Assembler::zero, SAME_TILL_END);
7800 incq(result);
7801 load_unsigned_byte(tmp1, Address(obja, result));
7802 load_unsigned_byte(tmp2, Address(objb, result));
7803 xorl(tmp1, tmp2);
7804 testl(tmp1, tmp1);
7805 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7806 decq(length);
7807 jcc(Assembler::zero, SAME_TILL_END);
7808 incq(result);
7809 load_unsigned_byte(tmp1, Address(obja, result));
7810 load_unsigned_byte(tmp2, Address(objb, result));
7811 xorl(tmp1, tmp2);
7812 testl(tmp1, tmp1);
7813 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7814 jmp(SAME_TILL_END);
7815
7816 if (UseAVX >= 2) {
7817 bind(VECTOR32_NOT_EQUAL);
7818 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
7819 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
7820 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
7821 vpmovmskb(tmp1, rymm0);
7822 bsfq(tmp1, tmp1);
7823 addq(result, tmp1);
7824 shrq(result);
7825 jmp(DONE);
7826 }
7827
7828 bind(VECTOR16_NOT_EQUAL);
7829 if (UseAVX >= 2) {
7830 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
7831 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
7832 pxor(rymm0, rymm2);
7833 } else {
7834 pcmpeqb(rymm2, rymm2);
7835 pxor(rymm0, rymm1);
7836 pcmpeqb(rymm0, rymm1);
7837 pxor(rymm0, rymm2);
7838 }
7839 pmovmskb(tmp1, rymm0);
7840 bsfq(tmp1, tmp1);
7841 addq(result, tmp1);
7842 shrq(result);
7843 jmpb(DONE);
7844
7845 bind(VECTOR8_NOT_EQUAL);
7846 bind(VECTOR4_NOT_EQUAL);
7847 bsfq(tmp1, tmp1);
7848 shrq(tmp1, 3);
7849 addq(result, tmp1);
7850 bind(BYTES_NOT_EQUAL);
7851 shrq(result);
7852 jmpb(DONE);
7853
7854 bind(SAME_TILL_END);
7855 mov64(result, -1);
7856
7857 bind(DONE);
7858 }
7859
7860 //Helper functions for square_to_len()
7861
7862 /**
7863 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7864 * Preserves x and z and modifies rest of the registers.
7865 */
7866 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7867 // Perform square and right shift by 1
7868 // Handle odd xlen case first, then for even xlen do the following
7869 // jlong carry = 0;
7870 // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7871 // huge_128 product = x[j:j+1] * x[j:j+1];
7872 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7873 // z[i+2:i+3] = (jlong)(product >>> 1);
7874 // carry = (jlong)product;
7875 // }
7876
7877 xorq(tmp5, tmp5); // carry
7878 xorq(rdxReg, rdxReg);
7879 xorl(tmp1, tmp1); // index for x
7880 xorl(tmp4, tmp4); // index for z
7881
7882 Label L_first_loop, L_first_loop_exit;
7883
7884 testl(xlen, 1);
7885 jccb(Assembler::zero, L_first_loop); //jump if xlen is even
7886
7887 // Square and right shift by 1 the odd element using 32 bit multiply
7888 movl(raxReg, Address(x, tmp1, Address::times_4, 0));
7889 imulq(raxReg, raxReg);
7890 shrq(raxReg, 1);
7891 adcq(tmp5, 0);
7892 movq(Address(z, tmp4, Address::times_4, 0), raxReg);
7893 incrementl(tmp1);
7894 addl(tmp4, 2);
7895
7896 // Square and right shift by 1 the rest using 64 bit multiply
7897 bind(L_first_loop);
7898 cmpptr(tmp1, xlen);
7899 jccb(Assembler::equal, L_first_loop_exit);
7900
7901 // Square
7902 movq(raxReg, Address(x, tmp1, Address::times_4, 0));
7903 rorq(raxReg, 32); // convert big-endian to little-endian
7904 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
7905
7906 // Right shift by 1 and save carry
7907 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
7908 rcrq(rdxReg, 1);
7909 rcrq(raxReg, 1);
7910 adcq(tmp5, 0);
7911
7912 // Store result in z
7913 movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
7914 movq(Address(z, tmp4, Address::times_4, 8), raxReg);
7915
7916 // Update indices for x and z
7917 addl(tmp1, 2);
7918 addl(tmp4, 4);
7919 jmp(L_first_loop);
7920
7921 bind(L_first_loop_exit);
7922 }
7923
7924
7925 /**
7926 * Perform the following multiply add operation using BMI2 instructions
7927 * carry:sum = sum + op1*op2 + carry
7928 * op2 should be in rdx
7929 * op2 is preserved, all other registers are modified
7930 */
7931 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
7932 // assert op2 is rdx
7933 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
7934 addq(sum, carry);
7935 adcq(tmp2, 0);
7936 addq(sum, op1);
7937 adcq(tmp2, 0);
7938 movq(carry, tmp2);
7939 }
7940
7941 /**
7942 * Perform the following multiply add operation:
7943 * carry:sum = sum + op1*op2 + carry
7944 * Preserves op1, op2 and modifies rest of registers
7945 */
7946 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
7947 // rdx:rax = op1 * op2
7948 movq(raxReg, op2);
7949 mulq(op1);
7950
7951 // rdx:rax = sum + carry + rdx:rax
7952 addq(sum, carry);
7953 adcq(rdxReg, 0);
7954 addq(sum, raxReg);
7955 adcq(rdxReg, 0);
7956
7957 // carry:sum = rdx:sum
7958 movq(carry, rdxReg);
7959 }
7960
7961 /**
7962 * Add 64 bit long carry into z[] with carry propagation.
7963 * Preserves z and carry register values and modifies rest of registers.
7964 *
7965 */
7966 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
7967 Label L_fourth_loop, L_fourth_loop_exit;
7968
7969 movl(tmp1, 1);
7970 subl(zlen, 2);
7971 addq(Address(z, zlen, Address::times_4, 0), carry);
7972
7973 bind(L_fourth_loop);
7974 jccb(Assembler::carryClear, L_fourth_loop_exit);
7975 subl(zlen, 2);
7976 jccb(Assembler::negative, L_fourth_loop_exit);
7977 addq(Address(z, zlen, Address::times_4, 0), tmp1);
7978 jmp(L_fourth_loop);
7979 bind(L_fourth_loop_exit);
7980 }
7981
7982 /**
7983 * Shift z[] left by 1 bit.
7984 * Preserves x, len, z and zlen registers and modifies rest of the registers.
7985 *
7986 */
7987 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
7988
7989 Label L_fifth_loop, L_fifth_loop_exit;
7990
7991 // Fifth loop
7992 // Perform primitiveLeftShift(z, zlen, 1)
7993
7994 const Register prev_carry = tmp1;
7995 const Register new_carry = tmp4;
7996 const Register value = tmp2;
7997 const Register zidx = tmp3;
7998
7999 // int zidx, carry;
8000 // long value;
8001 // carry = 0;
8002 // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
8003 // (carry:value) = (z[i] << 1) | carry ;
8004 // z[i] = value;
8005 // }
8006
8007 movl(zidx, zlen);
8008 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
8009
8010 bind(L_fifth_loop);
8011 decl(zidx); // Use decl to preserve carry flag
8012 decl(zidx);
8013 jccb(Assembler::negative, L_fifth_loop_exit);
8014
8015 if (UseBMI2Instructions) {
8016 movq(value, Address(z, zidx, Address::times_4, 0));
8017 rclq(value, 1);
8018 rorxq(value, value, 32);
8019 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8020 }
8021 else {
8022 // clear new_carry
8023 xorl(new_carry, new_carry);
8024
8025 // Shift z[i] by 1, or in previous carry and save new carry
8026 movq(value, Address(z, zidx, Address::times_4, 0));
8027 shlq(value, 1);
8028 adcl(new_carry, 0);
8029
8030 orq(value, prev_carry);
8031 rorq(value, 0x20);
8032 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8033
8034 // Set previous carry = new carry
8035 movl(prev_carry, new_carry);
8036 }
8037 jmp(L_fifth_loop);
8038
8039 bind(L_fifth_loop_exit);
8040 }
8041
8042
8043 /**
8044 * Code for BigInteger::squareToLen() intrinsic
8045 *
8046 * rdi: x
8047 * rsi: len
8048 * r8: z
8049 * rcx: zlen
8050 * r12: tmp1
8051 * r13: tmp2
8052 * r14: tmp3
8053 * r15: tmp4
8054 * rbx: tmp5
8055 *
8056 */
8057 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8058
8059 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
8060 push(tmp1);
8061 push(tmp2);
8062 push(tmp3);
8063 push(tmp4);
8064 push(tmp5);
8065
8066 // First loop
8067 // Store the squares, right shifted one bit (i.e., divided by 2).
8068 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
8069
8070 // Add in off-diagonal sums.
8071 //
8072 // Second, third (nested) and fourth loops.
8073 // zlen +=2;
8074 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
8075 // carry = 0;
8076 // long op2 = x[xidx:xidx+1];
8077 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
8078 // k -= 2;
8079 // long op1 = x[j:j+1];
8080 // long sum = z[k:k+1];
8081 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
8082 // z[k:k+1] = sum;
8083 // }
8084 // add_one_64(z, k, carry, tmp_regs);
8085 // }
8086
8087 const Register carry = tmp5;
8088 const Register sum = tmp3;
8089 const Register op1 = tmp4;
8090 Register op2 = tmp2;
8091
8092 push(zlen);
8093 push(len);
8094 addl(zlen,2);
8095 bind(L_second_loop);
8096 xorq(carry, carry);
8097 subl(zlen, 4);
8098 subl(len, 2);
8099 push(zlen);
8100 push(len);
8101 cmpl(len, 0);
8102 jccb(Assembler::lessEqual, L_second_loop_exit);
8103
8104 // Multiply an array by one 64 bit long.
8105 if (UseBMI2Instructions) {
8106 op2 = rdxReg;
8107 movq(op2, Address(x, len, Address::times_4, 0));
8108 rorxq(op2, op2, 32);
8109 }
8110 else {
8111 movq(op2, Address(x, len, Address::times_4, 0));
8112 rorq(op2, 32);
8113 }
8114
8115 bind(L_third_loop);
8116 decrementl(len);
8117 jccb(Assembler::negative, L_third_loop_exit);
8118 decrementl(len);
8119 jccb(Assembler::negative, L_last_x);
8120
8121 movq(op1, Address(x, len, Address::times_4, 0));
8122 rorq(op1, 32);
8123
8124 bind(L_multiply);
8125 subl(zlen, 2);
8126 movq(sum, Address(z, zlen, Address::times_4, 0));
8127
8128 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
8129 if (UseBMI2Instructions) {
8130 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
8131 }
8132 else {
8133 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8134 }
8135
8136 movq(Address(z, zlen, Address::times_4, 0), sum);
8137
8138 jmp(L_third_loop);
8139 bind(L_third_loop_exit);
8140
8141 // Fourth loop
8142 // Add 64 bit long carry into z with carry propagation.
8143 // Uses offsetted zlen.
8144 add_one_64(z, zlen, carry, tmp1);
8145
8146 pop(len);
8147 pop(zlen);
8148 jmp(L_second_loop);
8149
8150 // Next infrequent code is moved outside loops.
8151 bind(L_last_x);
8152 movl(op1, Address(x, 0));
8153 jmp(L_multiply);
8154
8155 bind(L_second_loop_exit);
8156 pop(len);
8157 pop(zlen);
8158 pop(len);
8159 pop(zlen);
8160
8161 // Fifth loop
8162 // Shift z left 1 bit.
8163 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8164
8165 // z[zlen-1] |= x[len-1] & 1;
8166 movl(tmp3, Address(x, len, Address::times_4, -4));
8167 andl(tmp3, 1);
8168 orl(Address(z, zlen, Address::times_4, -4), tmp3);
8169
8170 pop(tmp5);
8171 pop(tmp4);
8172 pop(tmp3);
8173 pop(tmp2);
8174 pop(tmp1);
8175 }
8176
8177 /**
8178 * Helper function for mul_add()
8179 * Multiply the in[] by int k and add to out[] starting at offset offs using
8180 * 128 bit by 32 bit multiply and return the carry in tmp5.
8181 * Only quad int aligned length of in[] is operated on in this function.
8182 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8183 * This function preserves out, in and k registers.
8184 * len and offset point to the appropriate index in "in" & "out" correspondingly
8185 * tmp5 has the carry.
8186 * other registers are temporary and are modified.
8187 *
8188 */
8189 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8190 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8191 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8192
8193 Label L_first_loop, L_first_loop_exit;
8194
8195 movl(tmp1, len);
8196 shrl(tmp1, 2);
8197
8198 bind(L_first_loop);
8199 subl(tmp1, 1);
8200 jccb(Assembler::negative, L_first_loop_exit);
8201
8202 subl(len, 4);
8203 subl(offset, 4);
8204
8205 Register op2 = tmp2;
8206 const Register sum = tmp3;
8207 const Register op1 = tmp4;
8208 const Register carry = tmp5;
8209
8210 if (UseBMI2Instructions) {
8211 op2 = rdxReg;
8212 }
8213
8214 movq(op1, Address(in, len, Address::times_4, 8));
8215 rorq(op1, 32);
8216 movq(sum, Address(out, offset, Address::times_4, 8));
8217 rorq(sum, 32);
8218 if (UseBMI2Instructions) {
8219 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8220 }
8221 else {
8222 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8223 }
8224 // Store back in big endian from little endian
8225 rorq(sum, 0x20);
8226 movq(Address(out, offset, Address::times_4, 8), sum);
8227
8228 movq(op1, Address(in, len, Address::times_4, 0));
8229 rorq(op1, 32);
8230 movq(sum, Address(out, offset, Address::times_4, 0));
8231 rorq(sum, 32);
8232 if (UseBMI2Instructions) {
8233 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8234 }
8235 else {
8236 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8237 }
8238 // Store back in big endian from little endian
8239 rorq(sum, 0x20);
8240 movq(Address(out, offset, Address::times_4, 0), sum);
8241
8242 jmp(L_first_loop);
8243 bind(L_first_loop_exit);
8244 }
8245
8246 /**
8247 * Code for BigInteger::mulAdd() intrinsic
8248 *
8249 * rdi: out
8250 * rsi: in
8251 * r11: offs (out.length - offset)
8252 * rcx: len
8253 * r8: k
8254 * r12: tmp1
8255 * r13: tmp2
8256 * r14: tmp3
8257 * r15: tmp4
8258 * rbx: tmp5
8259 * Multiply the in[] by word k and add to out[], return the carry in rax
8260 */
8261 void MacroAssembler::mul_add(Register out, Register in, Register offs,
8262 Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8263 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8264
8265 Label L_carry, L_last_in, L_done;
8266
8267 // carry = 0;
8268 // for (int j=len-1; j >= 0; j--) {
8269 // long product = (in[j] & LONG_MASK) * kLong +
8270 // (out[offs] & LONG_MASK) + carry;
8271 // out[offs--] = (int)product;
8272 // carry = product >>> 32;
8273 // }
8274 //
8275 push(tmp1);
8276 push(tmp2);
8277 push(tmp3);
8278 push(tmp4);
8279 push(tmp5);
8280
8281 Register op2 = tmp2;
8282 const Register sum = tmp3;
8283 const Register op1 = tmp4;
8284 const Register carry = tmp5;
8285
8286 if (UseBMI2Instructions) {
8287 op2 = rdxReg;
8288 movl(op2, k);
8289 }
8290 else {
8291 movl(op2, k);
8292 }
8293
8294 xorq(carry, carry);
8295
8296 //First loop
8297
8298 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8299 //The carry is in tmp5
8300 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8301
8302 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8303 decrementl(len);
8304 jccb(Assembler::negative, L_carry);
8305 decrementl(len);
8306 jccb(Assembler::negative, L_last_in);
8307
8308 movq(op1, Address(in, len, Address::times_4, 0));
8309 rorq(op1, 32);
8310
8311 subl(offs, 2);
8312 movq(sum, Address(out, offs, Address::times_4, 0));
8313 rorq(sum, 32);
8314
8315 if (UseBMI2Instructions) {
8316 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8317 }
8318 else {
8319 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8320 }
8321
8322 // Store back in big endian from little endian
8323 rorq(sum, 0x20);
8324 movq(Address(out, offs, Address::times_4, 0), sum);
8325
8326 testl(len, len);
8327 jccb(Assembler::zero, L_carry);
8328
8329 //Multiply the last in[] entry, if any
8330 bind(L_last_in);
8331 movl(op1, Address(in, 0));
8332 movl(sum, Address(out, offs, Address::times_4, -4));
8333
8334 movl(raxReg, k);
8335 mull(op1); //tmp4 * eax -> edx:eax
8336 addl(sum, carry);
8337 adcl(rdxReg, 0);
8338 addl(sum, raxReg);
8339 adcl(rdxReg, 0);
8340 movl(carry, rdxReg);
8341
8342 movl(Address(out, offs, Address::times_4, -4), sum);
8343
8344 bind(L_carry);
8345 //return tmp5/carry as carry in rax
8346 movl(rax, carry);
8347
8348 bind(L_done);
8349 pop(tmp5);
8350 pop(tmp4);
8351 pop(tmp3);
8352 pop(tmp2);
8353 pop(tmp1);
8354 }
8355
8356 /**
8357 * Emits code to update CRC-32 with a byte value according to constants in table
8358 *
8359 * @param [in,out]crc Register containing the crc.
8360 * @param [in]val Register containing the byte to fold into the CRC.
8361 * @param [in]table Register containing the table of crc constants.
8362 *
8363 * uint32_t crc;
8364 * val = crc_table[(val ^ crc) & 0xFF];
8365 * crc = val ^ (crc >> 8);
8366 *
8367 */
8368 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8369 xorl(val, crc);
8370 andl(val, 0xFF);
8371 shrl(crc, 8); // unsigned shift
8372 xorl(crc, Address(table, val, Address::times_4, 0));
8373 }
8374
8375 /**
8376 * Fold 128-bit data chunk
8377 */
8378 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8379 if (UseAVX > 0) {
8380 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8381 vpclmulldq(xcrc, xK, xcrc); // [63:0]
8382 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
8383 pxor(xcrc, xtmp);
8384 } else {
8385 movdqa(xtmp, xcrc);
8386 pclmulhdq(xtmp, xK); // [123:64]
8387 pclmulldq(xcrc, xK); // [63:0]
8388 pxor(xcrc, xtmp);
8389 movdqu(xtmp, Address(buf, offset));
8390 pxor(xcrc, xtmp);
8391 }
8392 }
8393
8394 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8395 if (UseAVX > 0) {
8396 vpclmulhdq(xtmp, xK, xcrc);
8397 vpclmulldq(xcrc, xK, xcrc);
8398 pxor(xcrc, xbuf);
8399 pxor(xcrc, xtmp);
8400 } else {
8401 movdqa(xtmp, xcrc);
8402 pclmulhdq(xtmp, xK);
8403 pclmulldq(xcrc, xK);
8404 pxor(xcrc, xbuf);
8405 pxor(xcrc, xtmp);
8406 }
8407 }
8408
8409 /**
8410 * 8-bit folds to compute 32-bit CRC
8411 *
8412 * uint64_t xcrc;
8413 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8414 */
8415 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8416 movdl(tmp, xcrc);
8417 andl(tmp, 0xFF);
8418 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8419 psrldq(xcrc, 1); // unsigned shift one byte
8420 pxor(xcrc, xtmp);
8421 }
8422
8423 /**
8424 * uint32_t crc;
8425 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8426 */
8427 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8428 movl(tmp, crc);
8429 andl(tmp, 0xFF);
8430 shrl(crc, 8);
8431 xorl(crc, Address(table, tmp, Address::times_4, 0));
8432 }
8433
8434 /**
8435 * @param crc register containing existing CRC (32-bit)
8436 * @param buf register pointing to input byte buffer (byte*)
8437 * @param len register containing number of bytes
8438 * @param table register that will contain address of CRC table
8439 * @param tmp scratch register
8440 */
8441 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8442 assert_different_registers(crc, buf, len, table, tmp, rax);
8443
8444 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8445 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8446
8447 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8448 // context for the registers used, where all instructions below are using 128-bit mode
8449 // On EVEX without VL and BW, these instructions will all be AVX.
8450 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8451 notl(crc); // ~crc
8452 cmpl(len, 16);
8453 jcc(Assembler::less, L_tail);
8454
8455 // Align buffer to 16 bytes
8456 movl(tmp, buf);
8457 andl(tmp, 0xF);
8458 jccb(Assembler::zero, L_aligned);
8459 subl(tmp, 16);
8460 addl(len, tmp);
8461
8462 align(4);
8463 BIND(L_align_loop);
8464 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8465 update_byte_crc32(crc, rax, table);
8466 increment(buf);
8467 incrementl(tmp);
8468 jccb(Assembler::less, L_align_loop);
8469
8470 BIND(L_aligned);
8471 movl(tmp, len); // save
8472 shrl(len, 4);
8473 jcc(Assembler::zero, L_tail_restore);
8474
8475 // Fold crc into first bytes of vector
8476 movdqa(xmm1, Address(buf, 0));
8477 movdl(rax, xmm1);
8478 xorl(crc, rax);
8479 if (VM_Version::supports_sse4_1()) {
8480 pinsrd(xmm1, crc, 0);
8481 } else {
8482 pinsrw(xmm1, crc, 0);
8483 shrl(crc, 16);
8484 pinsrw(xmm1, crc, 1);
8485 }
8486 addptr(buf, 16);
8487 subl(len, 4); // len > 0
8488 jcc(Assembler::less, L_fold_tail);
8489
8490 movdqa(xmm2, Address(buf, 0));
8491 movdqa(xmm3, Address(buf, 16));
8492 movdqa(xmm4, Address(buf, 32));
8493 addptr(buf, 48);
8494 subl(len, 3);
8495 jcc(Assembler::lessEqual, L_fold_512b);
8496
8497 // Fold total 512 bits of polynomial on each iteration,
8498 // 128 bits per each of 4 parallel streams.
8499 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
8500
8501 align32();
8502 BIND(L_fold_512b_loop);
8503 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8504 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8505 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8506 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8507 addptr(buf, 64);
8508 subl(len, 4);
8509 jcc(Assembler::greater, L_fold_512b_loop);
8510
8511 // Fold 512 bits to 128 bits.
8512 BIND(L_fold_512b);
8513 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8514 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8515 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8516 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8517
8518 // Fold the rest of 128 bits data chunks
8519 BIND(L_fold_tail);
8520 addl(len, 3);
8521 jccb(Assembler::lessEqual, L_fold_128b);
8522 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8523
8524 BIND(L_fold_tail_loop);
8525 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8526 addptr(buf, 16);
8527 decrementl(len);
8528 jccb(Assembler::greater, L_fold_tail_loop);
8529
8530 // Fold 128 bits in xmm1 down into 32 bits in crc register.
8531 BIND(L_fold_128b);
8532 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
8533 if (UseAVX > 0) {
8534 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8535 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
8536 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8537 } else {
8538 movdqa(xmm2, xmm0);
8539 pclmulqdq(xmm2, xmm1, 0x1);
8540 movdqa(xmm3, xmm0);
8541 pand(xmm3, xmm2);
8542 pclmulqdq(xmm0, xmm3, 0x1);
8543 }
8544 psrldq(xmm1, 8);
8545 psrldq(xmm2, 4);
8546 pxor(xmm0, xmm1);
8547 pxor(xmm0, xmm2);
8548
8549 // 8 8-bit folds to compute 32-bit CRC.
8550 for (int j = 0; j < 4; j++) {
8551 fold_8bit_crc32(xmm0, table, xmm1, rax);
8552 }
8553 movdl(crc, xmm0); // mov 32 bits to general register
8554 for (int j = 0; j < 4; j++) {
8555 fold_8bit_crc32(crc, table, rax);
8556 }
8557
8558 BIND(L_tail_restore);
8559 movl(len, tmp); // restore
8560 BIND(L_tail);
8561 andl(len, 0xf);
8562 jccb(Assembler::zero, L_exit);
8563
8564 // Fold the rest of bytes
8565 align(4);
8566 BIND(L_tail_loop);
8567 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8568 update_byte_crc32(crc, rax, table);
8569 increment(buf);
8570 decrementl(len);
8571 jccb(Assembler::greater, L_tail_loop);
8572
8573 BIND(L_exit);
8574 notl(crc); // ~c
8575 }
8576
8577 // Helper function for AVX 512 CRC32
8578 // Fold 512-bit data chunks
8579 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
8580 Register pos, int offset) {
8581 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
8582 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
8583 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
8584 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
8585 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
8586 }
8587
8588 // Helper function for AVX 512 CRC32
8589 // Compute CRC32 for < 256B buffers
8590 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
8591 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
8592 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
8593
8594 Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
8595 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
8596 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
8597
8598 // check if there is enough buffer to be able to fold 16B at a time
8599 cmpl(len, 32);
8600 jcc(Assembler::less, L_less_than_32);
8601
8602 // if there is, load the constants
8603 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10
8604 movdl(xmm0, crc); // get the initial crc value
8605 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8606 pxor(xmm7, xmm0);
8607
8608 // update the buffer pointer
8609 addl(pos, 16);
8610 //update the counter.subtract 32 instead of 16 to save one instruction from the loop
8611 subl(len, 32);
8612 jmp(L_16B_reduction_loop);
8613
8614 bind(L_less_than_32);
8615 //mov initial crc to the return value. this is necessary for zero - length buffers.
8616 movl(rax, crc);
8617 testl(len, len);
8618 jcc(Assembler::equal, L_cleanup);
8619
8620 movdl(xmm0, crc); //get the initial crc value
8621
8622 cmpl(len, 16);
8623 jcc(Assembler::equal, L_exact_16_left);
8624 jcc(Assembler::less, L_less_than_16_left);
8625
8626 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8627 pxor(xmm7, xmm0); //xor the initial crc value
8628 addl(pos, 16);
8629 subl(len, 16);
8630 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10
8631 jmp(L_get_last_two_xmms);
8632
8633 bind(L_less_than_16_left);
8634 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
8635 pxor(xmm1, xmm1);
8636 movptr(tmp1, rsp);
8637 movdqu(Address(tmp1, 0 * 16), xmm1);
8638
8639 cmpl(len, 4);
8640 jcc(Assembler::less, L_only_less_than_4);
8641
8642 //backup the counter value
8643 movl(tmp2, len);
8644 cmpl(len, 8);
8645 jcc(Assembler::less, L_less_than_8_left);
8646
8647 //load 8 Bytes
8648 movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
8649 movq(Address(tmp1, 0 * 16), rax);
8650 addptr(tmp1, 8);
8651 subl(len, 8);
8652 addl(pos, 8);
8653
8654 bind(L_less_than_8_left);
8655 cmpl(len, 4);
8656 jcc(Assembler::less, L_less_than_4_left);
8657
8658 //load 4 Bytes
8659 movl(rax, Address(buf, pos, Address::times_1, 0));
8660 movl(Address(tmp1, 0 * 16), rax);
8661 addptr(tmp1, 4);
8662 subl(len, 4);
8663 addl(pos, 4);
8664
8665 bind(L_less_than_4_left);
8666 cmpl(len, 2);
8667 jcc(Assembler::less, L_less_than_2_left);
8668
8669 // load 2 Bytes
8670 movw(rax, Address(buf, pos, Address::times_1, 0));
8671 movl(Address(tmp1, 0 * 16), rax);
8672 addptr(tmp1, 2);
8673 subl(len, 2);
8674 addl(pos, 2);
8675
8676 bind(L_less_than_2_left);
8677 cmpl(len, 1);
8678 jcc(Assembler::less, L_zero_left);
8679
8680 // load 1 Byte
8681 movb(rax, Address(buf, pos, Address::times_1, 0));
8682 movb(Address(tmp1, 0 * 16), rax);
8683
8684 bind(L_zero_left);
8685 movdqu(xmm7, Address(rsp, 0));
8686 pxor(xmm7, xmm0); //xor the initial crc value
8687
8688 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8689 movdqu(xmm0, Address(rax, tmp2));
8690 pshufb(xmm7, xmm0);
8691 jmp(L_128_done);
8692
8693 bind(L_exact_16_left);
8694 movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
8695 pxor(xmm7, xmm0); //xor the initial crc value
8696 jmp(L_128_done);
8697
8698 bind(L_only_less_than_4);
8699 cmpl(len, 3);
8700 jcc(Assembler::less, L_only_less_than_3);
8701
8702 // load 3 Bytes
8703 movb(rax, Address(buf, pos, Address::times_1, 0));
8704 movb(Address(tmp1, 0), rax);
8705
8706 movb(rax, Address(buf, pos, Address::times_1, 1));
8707 movb(Address(tmp1, 1), rax);
8708
8709 movb(rax, Address(buf, pos, Address::times_1, 2));
8710 movb(Address(tmp1, 2), rax);
8711
8712 movdqu(xmm7, Address(rsp, 0));
8713 pxor(xmm7, xmm0); //xor the initial crc value
8714
8715 pslldq(xmm7, 0x5);
8716 jmp(L_barrett);
8717 bind(L_only_less_than_3);
8718 cmpl(len, 2);
8719 jcc(Assembler::less, L_only_less_than_2);
8720
8721 // load 2 Bytes
8722 movb(rax, Address(buf, pos, Address::times_1, 0));
8723 movb(Address(tmp1, 0), rax);
8724
8725 movb(rax, Address(buf, pos, Address::times_1, 1));
8726 movb(Address(tmp1, 1), rax);
8727
8728 movdqu(xmm7, Address(rsp, 0));
8729 pxor(xmm7, xmm0); //xor the initial crc value
8730
8731 pslldq(xmm7, 0x6);
8732 jmp(L_barrett);
8733
8734 bind(L_only_less_than_2);
8735 //load 1 Byte
8736 movb(rax, Address(buf, pos, Address::times_1, 0));
8737 movb(Address(tmp1, 0), rax);
8738
8739 movdqu(xmm7, Address(rsp, 0));
8740 pxor(xmm7, xmm0); //xor the initial crc value
8741
8742 pslldq(xmm7, 0x7);
8743 }
8744
8745 /**
8746 * Compute CRC32 using AVX512 instructions
8747 * param crc register containing existing CRC (32-bit)
8748 * param buf register pointing to input byte buffer (byte*)
8749 * param len register containing number of bytes
8750 * param table address of crc or crc32c table
8751 * param tmp1 scratch register
8752 * param tmp2 scratch register
8753 * return rax result register
8754 *
8755 * This routine is identical for crc32c with the exception of the precomputed constant
8756 * table which will be passed as the table argument. The calculation steps are
8757 * the same for both variants.
8758 */
8759 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
8760 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
8761
8762 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8763 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8764 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
8765 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
8766 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
8767
8768 const Register pos = r12;
8769 push(r12);
8770 subptr(rsp, 16 * 2 + 8);
8771
8772 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8773 // context for the registers used, where all instructions below are using 128-bit mode
8774 // On EVEX without VL and BW, these instructions will all be AVX.
8775 movl(pos, 0);
8776
8777 // check if smaller than 256B
8778 cmpl(len, 256);
8779 jcc(Assembler::less, L_less_than_256);
8780
8781 // load the initial crc value
8782 movdl(xmm10, crc);
8783
8784 // receive the initial 64B data, xor the initial crc value
8785 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
8786 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
8787 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
8788 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
8789
8790 subl(len, 256);
8791 cmpl(len, 256);
8792 jcc(Assembler::less, L_fold_128_B_loop);
8793
8794 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
8795 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
8796 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
8797 subl(len, 256);
8798
8799 bind(L_fold_256_B_loop);
8800 addl(pos, 256);
8801 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
8802 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
8803 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
8804 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
8805
8806 subl(len, 256);
8807 jcc(Assembler::greaterEqual, L_fold_256_B_loop);
8808
8809 // Fold 256 into 128
8810 addl(pos, 256);
8811 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
8812 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
8813 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
8814
8815 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
8816 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
8817 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
8818
8819 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
8820 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
8821
8822 addl(len, 128);
8823 jmp(L_fold_128_B_register);
8824
8825 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
8826 // loop will fold 128B at a time until we have 128 + y Bytes of buffer
8827
8828 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
8829 bind(L_fold_128_B_loop);
8830 addl(pos, 128);
8831 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
8832 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
8833
8834 subl(len, 128);
8835 jcc(Assembler::greaterEqual, L_fold_128_B_loop);
8836
8837 addl(pos, 128);
8838
8839 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
8840 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
8841 bind(L_fold_128_B_register);
8842 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
8843 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
8844 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
8845 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
8846 // save last that has no multiplicand
8847 vextracti64x2(xmm7, xmm4, 3);
8848
8849 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
8850 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
8851 // Needed later in reduction loop
8852 movdqu(xmm10, Address(table, 1 * 16));
8853 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
8854 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
8855
8856 // Swap 1,0,3,2 - 01 00 11 10
8857 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
8858 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
8859 vextracti128(xmm5, xmm8, 1);
8860 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
8861
8862 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
8863 // instead of a cmp instruction, we use the negative flag with the jl instruction
8864 addl(len, 128 - 16);
8865 jcc(Assembler::less, L_final_reduction_for_128);
8866
8867 bind(L_16B_reduction_loop);
8868 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8869 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8870 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8871 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
8872 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8873 addl(pos, 16);
8874 subl(len, 16);
8875 jcc(Assembler::greaterEqual, L_16B_reduction_loop);
8876
8877 bind(L_final_reduction_for_128);
8878 addl(len, 16);
8879 jcc(Assembler::equal, L_128_done);
8880
8881 bind(L_get_last_two_xmms);
8882 movdqu(xmm2, xmm7);
8883 addl(pos, len);
8884 movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
8885 subl(pos, len);
8886
8887 // get rid of the extra data that was loaded before
8888 // load the shift constant
8889 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8890 movdqu(xmm0, Address(rax, len));
8891 addl(rax, len);
8892
8893 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8894 //Change mask to 512
8895 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
8896 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
8897
8898 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
8899 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8900 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8901 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8902 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
8903
8904 bind(L_128_done);
8905 // compute crc of a 128-bit value
8906 movdqu(xmm10, Address(table, 3 * 16));
8907 movdqu(xmm0, xmm7);
8908
8909 // 64b fold
8910 vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
8911 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
8912 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8913
8914 // 32b fold
8915 movdqu(xmm0, xmm7);
8916 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
8917 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8918 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8919 jmp(L_barrett);
8920
8921 bind(L_less_than_256);
8922 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
8923
8924 //barrett reduction
8925 bind(L_barrett);
8926 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
8927 movdqu(xmm1, xmm7);
8928 movdqu(xmm2, xmm7);
8929 movdqu(xmm10, Address(table, 4 * 16));
8930
8931 pclmulqdq(xmm7, xmm10, 0x0);
8932 pxor(xmm7, xmm2);
8933 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
8934 movdqu(xmm2, xmm7);
8935 pclmulqdq(xmm7, xmm10, 0x10);
8936 pxor(xmm7, xmm2);
8937 pxor(xmm7, xmm1);
8938 pextrd(crc, xmm7, 2);
8939
8940 bind(L_cleanup);
8941 addptr(rsp, 16 * 2 + 8);
8942 pop(r12);
8943 }
8944
8945 // S. Gueron / Information Processing Letters 112 (2012) 184
8946 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
8947 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
8948 // Output: the 64-bit carry-less product of B * CONST
8949 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
8950 Register tmp1, Register tmp2, Register tmp3) {
8951 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
8952 if (n > 0) {
8953 addq(tmp3, n * 256 * 8);
8954 }
8955 // Q1 = TABLEExt[n][B & 0xFF];
8956 movl(tmp1, in);
8957 andl(tmp1, 0x000000FF);
8958 shll(tmp1, 3);
8959 addq(tmp1, tmp3);
8960 movq(tmp1, Address(tmp1, 0));
8961
8962 // Q2 = TABLEExt[n][B >> 8 & 0xFF];
8963 movl(tmp2, in);
8964 shrl(tmp2, 8);
8965 andl(tmp2, 0x000000FF);
8966 shll(tmp2, 3);
8967 addq(tmp2, tmp3);
8968 movq(tmp2, Address(tmp2, 0));
8969
8970 shlq(tmp2, 8);
8971 xorq(tmp1, tmp2);
8972
8973 // Q3 = TABLEExt[n][B >> 16 & 0xFF];
8974 movl(tmp2, in);
8975 shrl(tmp2, 16);
8976 andl(tmp2, 0x000000FF);
8977 shll(tmp2, 3);
8978 addq(tmp2, tmp3);
8979 movq(tmp2, Address(tmp2, 0));
8980
8981 shlq(tmp2, 16);
8982 xorq(tmp1, tmp2);
8983
8984 // Q4 = TABLEExt[n][B >> 24 & 0xFF];
8985 shrl(in, 24);
8986 andl(in, 0x000000FF);
8987 shll(in, 3);
8988 addq(in, tmp3);
8989 movq(in, Address(in, 0));
8990
8991 shlq(in, 24);
8992 xorq(in, tmp1);
8993 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
8994 }
8995
8996 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
8997 Register in_out,
8998 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
8999 XMMRegister w_xtmp2,
9000 Register tmp1,
9001 Register n_tmp2, Register n_tmp3) {
9002 if (is_pclmulqdq_supported) {
9003 movdl(w_xtmp1, in_out); // modified blindly
9004
9005 movl(tmp1, const_or_pre_comp_const_index);
9006 movdl(w_xtmp2, tmp1);
9007 pclmulqdq(w_xtmp1, w_xtmp2, 0);
9008
9009 movdq(in_out, w_xtmp1);
9010 } else {
9011 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
9012 }
9013 }
9014
9015 // Recombination Alternative 2: No bit-reflections
9016 // T1 = (CRC_A * U1) << 1
9017 // T2 = (CRC_B * U2) << 1
9018 // C1 = T1 >> 32
9019 // C2 = T2 >> 32
9020 // T1 = T1 & 0xFFFFFFFF
9021 // T2 = T2 & 0xFFFFFFFF
9022 // T1 = CRC32(0, T1)
9023 // T2 = CRC32(0, T2)
9024 // C1 = C1 ^ T1
9025 // C2 = C2 ^ T2
9026 // CRC = C1 ^ C2 ^ CRC_C
9027 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
9028 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9029 Register tmp1, Register tmp2,
9030 Register n_tmp3) {
9031 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9032 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9033 shlq(in_out, 1);
9034 movl(tmp1, in_out);
9035 shrq(in_out, 32);
9036 xorl(tmp2, tmp2);
9037 crc32(tmp2, tmp1, 4);
9038 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
9039 shlq(in1, 1);
9040 movl(tmp1, in1);
9041 shrq(in1, 32);
9042 xorl(tmp2, tmp2);
9043 crc32(tmp2, tmp1, 4);
9044 xorl(in1, tmp2);
9045 xorl(in_out, in1);
9046 xorl(in_out, in2);
9047 }
9048
9049 // Set N to predefined value
9050 // Subtract from a length of a buffer
9051 // execute in a loop:
9052 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
9053 // for i = 1 to N do
9054 // CRC_A = CRC32(CRC_A, A[i])
9055 // CRC_B = CRC32(CRC_B, B[i])
9056 // CRC_C = CRC32(CRC_C, C[i])
9057 // end for
9058 // Recombine
9059 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
9060 Register in_out1, Register in_out2, Register in_out3,
9061 Register tmp1, Register tmp2, Register tmp3,
9062 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9063 Register tmp4, Register tmp5,
9064 Register n_tmp6) {
9065 Label L_processPartitions;
9066 Label L_processPartition;
9067 Label L_exit;
9068
9069 bind(L_processPartitions);
9070 cmpl(in_out1, 3 * size);
9071 jcc(Assembler::less, L_exit);
9072 xorl(tmp1, tmp1);
9073 xorl(tmp2, tmp2);
9074 movq(tmp3, in_out2);
9075 addq(tmp3, size);
9076
9077 bind(L_processPartition);
9078 crc32(in_out3, Address(in_out2, 0), 8);
9079 crc32(tmp1, Address(in_out2, size), 8);
9080 crc32(tmp2, Address(in_out2, size * 2), 8);
9081 addq(in_out2, 8);
9082 cmpq(in_out2, tmp3);
9083 jcc(Assembler::less, L_processPartition);
9084 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
9085 w_xtmp1, w_xtmp2, w_xtmp3,
9086 tmp4, tmp5,
9087 n_tmp6);
9088 addq(in_out2, 2 * size);
9089 subl(in_out1, 3 * size);
9090 jmp(L_processPartitions);
9091
9092 bind(L_exit);
9093 }
9094
9095 // Algorithm 2: Pipelined usage of the CRC32 instruction.
9096 // Input: A buffer I of L bytes.
9097 // Output: the CRC32C value of the buffer.
9098 // Notations:
9099 // Write L = 24N + r, with N = floor (L/24).
9100 // r = L mod 24 (0 <= r < 24).
9101 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
9102 // N quadwords, and R consists of r bytes.
9103 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
9104 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
9105 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
9106 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
9107 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
9108 Register tmp1, Register tmp2, Register tmp3,
9109 Register tmp4, Register tmp5, Register tmp6,
9110 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9111 bool is_pclmulqdq_supported) {
9112 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
9113 Label L_wordByWord;
9114 Label L_byteByByteProlog;
9115 Label L_byteByByte;
9116 Label L_exit;
9117
9118 if (is_pclmulqdq_supported ) {
9119 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
9120 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
9121
9122 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
9123 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
9124
9125 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
9126 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
9127 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
9128 } else {
9129 const_or_pre_comp_const_index[0] = 1;
9130 const_or_pre_comp_const_index[1] = 0;
9131
9132 const_or_pre_comp_const_index[2] = 3;
9133 const_or_pre_comp_const_index[3] = 2;
9134
9135 const_or_pre_comp_const_index[4] = 5;
9136 const_or_pre_comp_const_index[5] = 4;
9137 }
9138 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
9139 in2, in1, in_out,
9140 tmp1, tmp2, tmp3,
9141 w_xtmp1, w_xtmp2, w_xtmp3,
9142 tmp4, tmp5,
9143 tmp6);
9144 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
9145 in2, in1, in_out,
9146 tmp1, tmp2, tmp3,
9147 w_xtmp1, w_xtmp2, w_xtmp3,
9148 tmp4, tmp5,
9149 tmp6);
9150 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
9151 in2, in1, in_out,
9152 tmp1, tmp2, tmp3,
9153 w_xtmp1, w_xtmp2, w_xtmp3,
9154 tmp4, tmp5,
9155 tmp6);
9156 movl(tmp1, in2);
9157 andl(tmp1, 0x00000007);
9158 negl(tmp1);
9159 addl(tmp1, in2);
9160 addq(tmp1, in1);
9161
9162 cmpq(in1, tmp1);
9163 jccb(Assembler::greaterEqual, L_byteByByteProlog);
9164 align(16);
9165 BIND(L_wordByWord);
9166 crc32(in_out, Address(in1, 0), 8);
9167 addq(in1, 8);
9168 cmpq(in1, tmp1);
9169 jcc(Assembler::less, L_wordByWord);
9170
9171 BIND(L_byteByByteProlog);
9172 andl(in2, 0x00000007);
9173 movl(tmp2, 1);
9174
9175 cmpl(tmp2, in2);
9176 jccb(Assembler::greater, L_exit);
9177 BIND(L_byteByByte);
9178 crc32(in_out, Address(in1, 0), 1);
9179 incq(in1);
9180 incl(tmp2);
9181 cmpl(tmp2, in2);
9182 jcc(Assembler::lessEqual, L_byteByByte);
9183
9184 BIND(L_exit);
9185 }
9186 #undef BIND
9187 #undef BLOCK_COMMENT
9188
9189 // Compress char[] array to byte[].
9190 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
9191 // Return the array length if every element in array can be encoded,
9192 // otherwise, the index of first non-latin1 (> 0xff) character.
9193 // @IntrinsicCandidate
9194 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
9195 // for (int i = 0; i < len; i++) {
9196 // char c = src[srcOff];
9197 // if (c > 0xff) {
9198 // return i; // return index of non-latin1 char
9199 // }
9200 // dst[dstOff] = (byte)c;
9201 // srcOff++;
9202 // dstOff++;
9203 // }
9204 // return len;
9205 // }
9206 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
9207 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
9208 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
9209 Register tmp5, Register result, KRegister mask1, KRegister mask2) {
9210 Label copy_chars_loop, done, reset_sp, copy_tail;
9211
9212 // rsi: src
9213 // rdi: dst
9214 // rdx: len
9215 // rcx: tmp5
9216 // rax: result
9217
9218 // rsi holds start addr of source char[] to be compressed
9219 // rdi holds start addr of destination byte[]
9220 // rdx holds length
9221
9222 assert(len != result, "");
9223
9224 // save length for return
9225 movl(result, len);
9226
9227 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
9228 VM_Version::supports_avx512vlbw() &&
9229 VM_Version::supports_bmi2()) {
9230
9231 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
9232
9233 // alignment
9234 Label post_alignment;
9235
9236 // if length of the string is less than 32, handle it the old fashioned way
9237 testl(len, -32);
9238 jcc(Assembler::zero, below_threshold);
9239
9240 // First check whether a character is compressible ( <= 0xFF).
9241 // Create mask to test for Unicode chars inside zmm vector
9242 movl(tmp5, 0x00FF);
9243 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
9244
9245 testl(len, -64);
9246 jccb(Assembler::zero, post_alignment);
9247
9248 movl(tmp5, dst);
9249 andl(tmp5, (32 - 1));
9250 negl(tmp5);
9251 andl(tmp5, (32 - 1));
9252
9253 // bail out when there is nothing to be done
9254 testl(tmp5, 0xFFFFFFFF);
9255 jccb(Assembler::zero, post_alignment);
9256
9257 // ~(~0 << len), where len is the # of remaining elements to process
9258 movl(len, 0xFFFFFFFF);
9259 shlxl(len, len, tmp5);
9260 notl(len);
9261 kmovdl(mask2, len);
9262 movl(len, result);
9263
9264 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9265 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9266 ktestd(mask1, mask2);
9267 jcc(Assembler::carryClear, copy_tail);
9268
9269 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9270
9271 addptr(src, tmp5);
9272 addptr(src, tmp5);
9273 addptr(dst, tmp5);
9274 subl(len, tmp5);
9275
9276 bind(post_alignment);
9277 // end of alignment
9278
9279 movl(tmp5, len);
9280 andl(tmp5, (32 - 1)); // tail count (in chars)
9281 andl(len, ~(32 - 1)); // vector count (in chars)
9282 jccb(Assembler::zero, copy_loop_tail);
9283
9284 lea(src, Address(src, len, Address::times_2));
9285 lea(dst, Address(dst, len, Address::times_1));
9286 negptr(len);
9287
9288 bind(copy_32_loop);
9289 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
9290 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
9291 kortestdl(mask1, mask1);
9292 jccb(Assembler::carryClear, reset_for_copy_tail);
9293
9294 // All elements in current processed chunk are valid candidates for
9295 // compression. Write a truncated byte elements to the memory.
9296 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
9297 addptr(len, 32);
9298 jccb(Assembler::notZero, copy_32_loop);
9299
9300 bind(copy_loop_tail);
9301 // bail out when there is nothing to be done
9302 testl(tmp5, 0xFFFFFFFF);
9303 jcc(Assembler::zero, done);
9304
9305 movl(len, tmp5);
9306
9307 // ~(~0 << len), where len is the # of remaining elements to process
9308 movl(tmp5, 0xFFFFFFFF);
9309 shlxl(tmp5, tmp5, len);
9310 notl(tmp5);
9311
9312 kmovdl(mask2, tmp5);
9313
9314 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9315 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9316 ktestd(mask1, mask2);
9317 jcc(Assembler::carryClear, copy_tail);
9318
9319 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9320 jmp(done);
9321
9322 bind(reset_for_copy_tail);
9323 lea(src, Address(src, tmp5, Address::times_2));
9324 lea(dst, Address(dst, tmp5, Address::times_1));
9325 subptr(len, tmp5);
9326 jmp(copy_chars_loop);
9327
9328 bind(below_threshold);
9329 }
9330
9331 if (UseSSE42Intrinsics) {
9332 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
9333
9334 // vectored compression
9335 testl(len, 0xfffffff8);
9336 jcc(Assembler::zero, copy_tail);
9337
9338 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors
9339 movdl(tmp1Reg, tmp5);
9340 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg
9341
9342 andl(len, 0xfffffff0);
9343 jccb(Assembler::zero, copy_16);
9344
9345 // compress 16 chars per iter
9346 pxor(tmp4Reg, tmp4Reg);
9347
9348 lea(src, Address(src, len, Address::times_2));
9349 lea(dst, Address(dst, len, Address::times_1));
9350 negptr(len);
9351
9352 bind(copy_32_loop);
9353 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters
9354 por(tmp4Reg, tmp2Reg);
9355 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
9356 por(tmp4Reg, tmp3Reg);
9357 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector
9358 jccb(Assembler::notZero, reset_for_copy_tail);
9359 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte
9360 movdqu(Address(dst, len, Address::times_1), tmp2Reg);
9361 addptr(len, 16);
9362 jccb(Assembler::notZero, copy_32_loop);
9363
9364 // compress next vector of 8 chars (if any)
9365 bind(copy_16);
9366 // len = 0
9367 testl(result, 0x00000008); // check if there's a block of 8 chars to compress
9368 jccb(Assembler::zero, copy_tail_sse);
9369
9370 pxor(tmp3Reg, tmp3Reg);
9371
9372 movdqu(tmp2Reg, Address(src, 0));
9373 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
9374 jccb(Assembler::notZero, reset_for_copy_tail);
9375 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte
9376 movq(Address(dst, 0), tmp2Reg);
9377 addptr(src, 16);
9378 addptr(dst, 8);
9379 jmpb(copy_tail_sse);
9380
9381 bind(reset_for_copy_tail);
9382 movl(tmp5, result);
9383 andl(tmp5, 0x0000000f);
9384 lea(src, Address(src, tmp5, Address::times_2));
9385 lea(dst, Address(dst, tmp5, Address::times_1));
9386 subptr(len, tmp5);
9387 jmpb(copy_chars_loop);
9388
9389 bind(copy_tail_sse);
9390 movl(len, result);
9391 andl(len, 0x00000007); // tail count (in chars)
9392 }
9393 // compress 1 char per iter
9394 bind(copy_tail);
9395 testl(len, len);
9396 jccb(Assembler::zero, done);
9397 lea(src, Address(src, len, Address::times_2));
9398 lea(dst, Address(dst, len, Address::times_1));
9399 negptr(len);
9400
9401 bind(copy_chars_loop);
9402 load_unsigned_short(tmp5, Address(src, len, Address::times_2));
9403 testl(tmp5, 0xff00); // check if Unicode char
9404 jccb(Assembler::notZero, reset_sp);
9405 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte
9406 increment(len);
9407 jccb(Assembler::notZero, copy_chars_loop);
9408
9409 // add len then return (len will be zero if compress succeeded, otherwise negative)
9410 bind(reset_sp);
9411 addl(result, len);
9412
9413 bind(done);
9414 }
9415
9416 // Inflate byte[] array to char[].
9417 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
9418 // @IntrinsicCandidate
9419 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
9420 // for (int i = 0; i < len; i++) {
9421 // dst[dstOff++] = (char)(src[srcOff++] & 0xff);
9422 // }
9423 // }
9424 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
9425 XMMRegister tmp1, Register tmp2, KRegister mask) {
9426 Label copy_chars_loop, done, below_threshold, avx3_threshold;
9427 // rsi: src
9428 // rdi: dst
9429 // rdx: len
9430 // rcx: tmp2
9431
9432 // rsi holds start addr of source byte[] to be inflated
9433 // rdi holds start addr of destination char[]
9434 // rdx holds length
9435 assert_different_registers(src, dst, len, tmp2);
9436 movl(tmp2, len);
9437 if ((UseAVX > 2) && // AVX512
9438 VM_Version::supports_avx512vlbw() &&
9439 VM_Version::supports_bmi2()) {
9440
9441 Label copy_32_loop, copy_tail;
9442 Register tmp3_aliased = len;
9443
9444 // if length of the string is less than 16, handle it in an old fashioned way
9445 testl(len, -16);
9446 jcc(Assembler::zero, below_threshold);
9447
9448 testl(len, -1 * AVX3Threshold);
9449 jcc(Assembler::zero, avx3_threshold);
9450
9451 // In order to use only one arithmetic operation for the main loop we use
9452 // this pre-calculation
9453 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
9454 andl(len, -32); // vector count
9455 jccb(Assembler::zero, copy_tail);
9456
9457 lea(src, Address(src, len, Address::times_1));
9458 lea(dst, Address(dst, len, Address::times_2));
9459 negptr(len);
9460
9461
9462 // inflate 32 chars per iter
9463 bind(copy_32_loop);
9464 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
9465 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
9466 addptr(len, 32);
9467 jcc(Assembler::notZero, copy_32_loop);
9468
9469 bind(copy_tail);
9470 // bail out when there is nothing to be done
9471 testl(tmp2, -1); // we don't destroy the contents of tmp2 here
9472 jcc(Assembler::zero, done);
9473
9474 // ~(~0 << length), where length is the # of remaining elements to process
9475 movl(tmp3_aliased, -1);
9476 shlxl(tmp3_aliased, tmp3_aliased, tmp2);
9477 notl(tmp3_aliased);
9478 kmovdl(mask, tmp3_aliased);
9479 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
9480 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
9481
9482 jmp(done);
9483 bind(avx3_threshold);
9484 }
9485 if (UseSSE42Intrinsics) {
9486 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
9487
9488 if (UseAVX > 1) {
9489 andl(tmp2, (16 - 1));
9490 andl(len, -16);
9491 jccb(Assembler::zero, copy_new_tail);
9492 } else {
9493 andl(tmp2, 0x00000007); // tail count (in chars)
9494 andl(len, 0xfffffff8); // vector count (in chars)
9495 jccb(Assembler::zero, copy_tail);
9496 }
9497
9498 // vectored inflation
9499 lea(src, Address(src, len, Address::times_1));
9500 lea(dst, Address(dst, len, Address::times_2));
9501 negptr(len);
9502
9503 if (UseAVX > 1) {
9504 bind(copy_16_loop);
9505 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
9506 vmovdqu(Address(dst, len, Address::times_2), tmp1);
9507 addptr(len, 16);
9508 jcc(Assembler::notZero, copy_16_loop);
9509
9510 bind(below_threshold);
9511 bind(copy_new_tail);
9512 movl(len, tmp2);
9513 andl(tmp2, 0x00000007);
9514 andl(len, 0xFFFFFFF8);
9515 jccb(Assembler::zero, copy_tail);
9516
9517 pmovzxbw(tmp1, Address(src, 0));
9518 movdqu(Address(dst, 0), tmp1);
9519 addptr(src, 8);
9520 addptr(dst, 2 * 8);
9521
9522 jmp(copy_tail, true);
9523 }
9524
9525 // inflate 8 chars per iter
9526 bind(copy_8_loop);
9527 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words
9528 movdqu(Address(dst, len, Address::times_2), tmp1);
9529 addptr(len, 8);
9530 jcc(Assembler::notZero, copy_8_loop);
9531
9532 bind(copy_tail);
9533 movl(len, tmp2);
9534
9535 cmpl(len, 4);
9536 jccb(Assembler::less, copy_bytes);
9537
9538 movdl(tmp1, Address(src, 0)); // load 4 byte chars
9539 pmovzxbw(tmp1, tmp1);
9540 movq(Address(dst, 0), tmp1);
9541 subptr(len, 4);
9542 addptr(src, 4);
9543 addptr(dst, 8);
9544
9545 bind(copy_bytes);
9546 } else {
9547 bind(below_threshold);
9548 }
9549
9550 testl(len, len);
9551 jccb(Assembler::zero, done);
9552 lea(src, Address(src, len, Address::times_1));
9553 lea(dst, Address(dst, len, Address::times_2));
9554 negptr(len);
9555
9556 // inflate 1 char per iter
9557 bind(copy_chars_loop);
9558 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char
9559 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word
9560 increment(len);
9561 jcc(Assembler::notZero, copy_chars_loop);
9562
9563 bind(done);
9564 }
9565
9566 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
9567 switch(type) {
9568 case T_BYTE:
9569 case T_BOOLEAN:
9570 evmovdqub(dst, kmask, src, merge, vector_len);
9571 break;
9572 case T_CHAR:
9573 case T_SHORT:
9574 evmovdquw(dst, kmask, src, merge, vector_len);
9575 break;
9576 case T_INT:
9577 case T_FLOAT:
9578 evmovdqul(dst, kmask, src, merge, vector_len);
9579 break;
9580 case T_LONG:
9581 case T_DOUBLE:
9582 evmovdquq(dst, kmask, src, merge, vector_len);
9583 break;
9584 default:
9585 fatal("Unexpected type argument %s", type2name(type));
9586 break;
9587 }
9588 }
9589
9590
9591 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
9592 switch(type) {
9593 case T_BYTE:
9594 case T_BOOLEAN:
9595 evmovdqub(dst, kmask, src, merge, vector_len);
9596 break;
9597 case T_CHAR:
9598 case T_SHORT:
9599 evmovdquw(dst, kmask, src, merge, vector_len);
9600 break;
9601 case T_INT:
9602 case T_FLOAT:
9603 evmovdqul(dst, kmask, src, merge, vector_len);
9604 break;
9605 case T_LONG:
9606 case T_DOUBLE:
9607 evmovdquq(dst, kmask, src, merge, vector_len);
9608 break;
9609 default:
9610 fatal("Unexpected type argument %s", type2name(type));
9611 break;
9612 }
9613 }
9614
9615 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
9616 switch(type) {
9617 case T_BYTE:
9618 case T_BOOLEAN:
9619 evmovdqub(dst, kmask, src, merge, vector_len);
9620 break;
9621 case T_CHAR:
9622 case T_SHORT:
9623 evmovdquw(dst, kmask, src, merge, vector_len);
9624 break;
9625 case T_INT:
9626 case T_FLOAT:
9627 evmovdqul(dst, kmask, src, merge, vector_len);
9628 break;
9629 case T_LONG:
9630 case T_DOUBLE:
9631 evmovdquq(dst, kmask, src, merge, vector_len);
9632 break;
9633 default:
9634 fatal("Unexpected type argument %s", type2name(type));
9635 break;
9636 }
9637 }
9638
9639 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
9640 switch(masklen) {
9641 case 2:
9642 knotbl(dst, src);
9643 movl(rtmp, 3);
9644 kmovbl(ktmp, rtmp);
9645 kandbl(dst, ktmp, dst);
9646 break;
9647 case 4:
9648 knotbl(dst, src);
9649 movl(rtmp, 15);
9650 kmovbl(ktmp, rtmp);
9651 kandbl(dst, ktmp, dst);
9652 break;
9653 case 8:
9654 knotbl(dst, src);
9655 break;
9656 case 16:
9657 knotwl(dst, src);
9658 break;
9659 case 32:
9660 knotdl(dst, src);
9661 break;
9662 case 64:
9663 knotql(dst, src);
9664 break;
9665 default:
9666 fatal("Unexpected vector length %d", masklen);
9667 break;
9668 }
9669 }
9670
9671 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9672 switch(type) {
9673 case T_BOOLEAN:
9674 case T_BYTE:
9675 kandbl(dst, src1, src2);
9676 break;
9677 case T_CHAR:
9678 case T_SHORT:
9679 kandwl(dst, src1, src2);
9680 break;
9681 case T_INT:
9682 case T_FLOAT:
9683 kanddl(dst, src1, src2);
9684 break;
9685 case T_LONG:
9686 case T_DOUBLE:
9687 kandql(dst, src1, src2);
9688 break;
9689 default:
9690 fatal("Unexpected type argument %s", type2name(type));
9691 break;
9692 }
9693 }
9694
9695 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9696 switch(type) {
9697 case T_BOOLEAN:
9698 case T_BYTE:
9699 korbl(dst, src1, src2);
9700 break;
9701 case T_CHAR:
9702 case T_SHORT:
9703 korwl(dst, src1, src2);
9704 break;
9705 case T_INT:
9706 case T_FLOAT:
9707 kordl(dst, src1, src2);
9708 break;
9709 case T_LONG:
9710 case T_DOUBLE:
9711 korql(dst, src1, src2);
9712 break;
9713 default:
9714 fatal("Unexpected type argument %s", type2name(type));
9715 break;
9716 }
9717 }
9718
9719 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9720 switch(type) {
9721 case T_BOOLEAN:
9722 case T_BYTE:
9723 kxorbl(dst, src1, src2);
9724 break;
9725 case T_CHAR:
9726 case T_SHORT:
9727 kxorwl(dst, src1, src2);
9728 break;
9729 case T_INT:
9730 case T_FLOAT:
9731 kxordl(dst, src1, src2);
9732 break;
9733 case T_LONG:
9734 case T_DOUBLE:
9735 kxorql(dst, src1, src2);
9736 break;
9737 default:
9738 fatal("Unexpected type argument %s", type2name(type));
9739 break;
9740 }
9741 }
9742
9743 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9744 switch(type) {
9745 case T_BOOLEAN:
9746 case T_BYTE:
9747 evpermb(dst, mask, nds, src, merge, vector_len); break;
9748 case T_CHAR:
9749 case T_SHORT:
9750 evpermw(dst, mask, nds, src, merge, vector_len); break;
9751 case T_INT:
9752 case T_FLOAT:
9753 evpermd(dst, mask, nds, src, merge, vector_len); break;
9754 case T_LONG:
9755 case T_DOUBLE:
9756 evpermq(dst, mask, nds, src, merge, vector_len); break;
9757 default:
9758 fatal("Unexpected type argument %s", type2name(type)); break;
9759 }
9760 }
9761
9762 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9763 switch(type) {
9764 case T_BOOLEAN:
9765 case T_BYTE:
9766 evpermb(dst, mask, nds, src, merge, vector_len); break;
9767 case T_CHAR:
9768 case T_SHORT:
9769 evpermw(dst, mask, nds, src, merge, vector_len); break;
9770 case T_INT:
9771 case T_FLOAT:
9772 evpermd(dst, mask, nds, src, merge, vector_len); break;
9773 case T_LONG:
9774 case T_DOUBLE:
9775 evpermq(dst, mask, nds, src, merge, vector_len); break;
9776 default:
9777 fatal("Unexpected type argument %s", type2name(type)); break;
9778 }
9779 }
9780
9781 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9782 switch(type) {
9783 case T_BYTE:
9784 evpminub(dst, mask, nds, src, merge, vector_len); break;
9785 case T_SHORT:
9786 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9787 case T_INT:
9788 evpminud(dst, mask, nds, src, merge, vector_len); break;
9789 case T_LONG:
9790 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9791 default:
9792 fatal("Unexpected type argument %s", type2name(type)); break;
9793 }
9794 }
9795
9796 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9797 switch(type) {
9798 case T_BYTE:
9799 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9800 case T_SHORT:
9801 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9802 case T_INT:
9803 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9804 case T_LONG:
9805 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9806 default:
9807 fatal("Unexpected type argument %s", type2name(type)); break;
9808 }
9809 }
9810
9811 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9812 switch(type) {
9813 case T_BYTE:
9814 evpminub(dst, mask, nds, src, merge, vector_len); break;
9815 case T_SHORT:
9816 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9817 case T_INT:
9818 evpminud(dst, mask, nds, src, merge, vector_len); break;
9819 case T_LONG:
9820 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9821 default:
9822 fatal("Unexpected type argument %s", type2name(type)); break;
9823 }
9824 }
9825
9826 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9827 switch(type) {
9828 case T_BYTE:
9829 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9830 case T_SHORT:
9831 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9832 case T_INT:
9833 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9834 case T_LONG:
9835 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9836 default:
9837 fatal("Unexpected type argument %s", type2name(type)); break;
9838 }
9839 }
9840
9841 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9842 switch(type) {
9843 case T_BYTE:
9844 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9845 case T_SHORT:
9846 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9847 case T_INT:
9848 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9849 case T_LONG:
9850 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9851 case T_FLOAT:
9852 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9853 case T_DOUBLE:
9854 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9855 default:
9856 fatal("Unexpected type argument %s", type2name(type)); break;
9857 }
9858 }
9859
9860 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9861 switch(type) {
9862 case T_BYTE:
9863 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9864 case T_SHORT:
9865 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9866 case T_INT:
9867 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9868 case T_LONG:
9869 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9870 case T_FLOAT:
9871 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9872 case T_DOUBLE:
9873 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9874 default:
9875 fatal("Unexpected type argument %s", type2name(type)); break;
9876 }
9877 }
9878
9879 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9880 switch(type) {
9881 case T_BYTE:
9882 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9883 case T_SHORT:
9884 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9885 case T_INT:
9886 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9887 case T_LONG:
9888 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9889 case T_FLOAT:
9890 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9891 case T_DOUBLE:
9892 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9893 default:
9894 fatal("Unexpected type argument %s", type2name(type)); break;
9895 }
9896 }
9897
9898 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9899 switch(type) {
9900 case T_BYTE:
9901 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9902 case T_SHORT:
9903 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9904 case T_INT:
9905 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9906 case T_LONG:
9907 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9908 case T_FLOAT:
9909 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9910 case T_DOUBLE:
9911 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9912 default:
9913 fatal("Unexpected type argument %s", type2name(type)); break;
9914 }
9915 }
9916
9917 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9918 switch(type) {
9919 case T_INT:
9920 evpxord(dst, mask, nds, src, merge, vector_len); break;
9921 case T_LONG:
9922 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9923 default:
9924 fatal("Unexpected type argument %s", type2name(type)); break;
9925 }
9926 }
9927
9928 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9929 switch(type) {
9930 case T_INT:
9931 evpxord(dst, mask, nds, src, merge, vector_len); break;
9932 case T_LONG:
9933 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9934 default:
9935 fatal("Unexpected type argument %s", type2name(type)); break;
9936 }
9937 }
9938
9939 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9940 switch(type) {
9941 case T_INT:
9942 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9943 case T_LONG:
9944 evporq(dst, mask, nds, src, merge, vector_len); break;
9945 default:
9946 fatal("Unexpected type argument %s", type2name(type)); break;
9947 }
9948 }
9949
9950 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9951 switch(type) {
9952 case T_INT:
9953 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9954 case T_LONG:
9955 evporq(dst, mask, nds, src, merge, vector_len); break;
9956 default:
9957 fatal("Unexpected type argument %s", type2name(type)); break;
9958 }
9959 }
9960
9961 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9962 switch(type) {
9963 case T_INT:
9964 evpandd(dst, mask, nds, src, merge, vector_len); break;
9965 case T_LONG:
9966 evpandq(dst, mask, nds, src, merge, vector_len); break;
9967 default:
9968 fatal("Unexpected type argument %s", type2name(type)); break;
9969 }
9970 }
9971
9972 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9973 switch(type) {
9974 case T_INT:
9975 evpandd(dst, mask, nds, src, merge, vector_len); break;
9976 case T_LONG:
9977 evpandq(dst, mask, nds, src, merge, vector_len); break;
9978 default:
9979 fatal("Unexpected type argument %s", type2name(type)); break;
9980 }
9981 }
9982
9983 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
9984 switch(masklen) {
9985 case 8:
9986 kortestbl(src1, src2);
9987 break;
9988 case 16:
9989 kortestwl(src1, src2);
9990 break;
9991 case 32:
9992 kortestdl(src1, src2);
9993 break;
9994 case 64:
9995 kortestql(src1, src2);
9996 break;
9997 default:
9998 fatal("Unexpected mask length %d", masklen);
9999 break;
10000 }
10001 }
10002
10003
10004 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
10005 switch(masklen) {
10006 case 8:
10007 ktestbl(src1, src2);
10008 break;
10009 case 16:
10010 ktestwl(src1, src2);
10011 break;
10012 case 32:
10013 ktestdl(src1, src2);
10014 break;
10015 case 64:
10016 ktestql(src1, src2);
10017 break;
10018 default:
10019 fatal("Unexpected mask length %d", masklen);
10020 break;
10021 }
10022 }
10023
10024 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10025 switch(type) {
10026 case T_INT:
10027 evprold(dst, mask, src, shift, merge, vlen_enc); break;
10028 case T_LONG:
10029 evprolq(dst, mask, src, shift, merge, vlen_enc); break;
10030 default:
10031 fatal("Unexpected type argument %s", type2name(type)); break;
10032 break;
10033 }
10034 }
10035
10036 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10037 switch(type) {
10038 case T_INT:
10039 evprord(dst, mask, src, shift, merge, vlen_enc); break;
10040 case T_LONG:
10041 evprorq(dst, mask, src, shift, merge, vlen_enc); break;
10042 default:
10043 fatal("Unexpected type argument %s", type2name(type)); break;
10044 }
10045 }
10046
10047 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10048 switch(type) {
10049 case T_INT:
10050 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
10051 case T_LONG:
10052 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
10053 default:
10054 fatal("Unexpected type argument %s", type2name(type)); break;
10055 }
10056 }
10057
10058 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10059 switch(type) {
10060 case T_INT:
10061 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
10062 case T_LONG:
10063 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
10064 default:
10065 fatal("Unexpected type argument %s", type2name(type)); break;
10066 }
10067 }
10068
10069 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10070 assert(rscratch != noreg || always_reachable(src), "missing");
10071
10072 if (reachable(src)) {
10073 evpandq(dst, nds, as_Address(src), vector_len);
10074 } else {
10075 lea(rscratch, src);
10076 evpandq(dst, nds, Address(rscratch, 0), vector_len);
10077 }
10078 }
10079
10080 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
10081 assert(rscratch != noreg || always_reachable(src), "missing");
10082
10083 if (reachable(src)) {
10084 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
10085 } else {
10086 lea(rscratch, src);
10087 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
10088 }
10089 }
10090
10091 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10092 assert(rscratch != noreg || always_reachable(src), "missing");
10093
10094 if (reachable(src)) {
10095 evporq(dst, nds, as_Address(src), vector_len);
10096 } else {
10097 lea(rscratch, src);
10098 evporq(dst, nds, Address(rscratch, 0), vector_len);
10099 }
10100 }
10101
10102 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10103 assert(rscratch != noreg || always_reachable(src), "missing");
10104
10105 if (reachable(src)) {
10106 vpshufb(dst, nds, as_Address(src), vector_len);
10107 } else {
10108 lea(rscratch, src);
10109 vpshufb(dst, nds, Address(rscratch, 0), vector_len);
10110 }
10111 }
10112
10113 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10114 assert(rscratch != noreg || always_reachable(src), "missing");
10115
10116 if (reachable(src)) {
10117 Assembler::vpor(dst, nds, as_Address(src), vector_len);
10118 } else {
10119 lea(rscratch, src);
10120 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
10121 }
10122 }
10123
10124 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
10125 assert(rscratch != noreg || always_reachable(src3), "missing");
10126
10127 if (reachable(src3)) {
10128 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
10129 } else {
10130 lea(rscratch, src3);
10131 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
10132 }
10133 }
10134
10135 #if COMPILER2_OR_JVMCI
10136
10137 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
10138 Register length, Register temp, int vec_enc) {
10139 // Computing mask for predicated vector store.
10140 movptr(temp, -1);
10141 bzhiq(temp, temp, length);
10142 kmov(mask, temp);
10143 evmovdqu(bt, mask, dst, xmm, true, vec_enc);
10144 }
10145
10146 // Set memory operation for length "less than" 64 bytes.
10147 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
10148 XMMRegister xmm, KRegister mask, Register length,
10149 Register temp, bool use64byteVector) {
10150 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10151 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10152 if (!use64byteVector) {
10153 fill32(dst, disp, xmm);
10154 subptr(length, 32 >> shift);
10155 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
10156 } else {
10157 assert(MaxVectorSize == 64, "vector length != 64");
10158 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
10159 }
10160 }
10161
10162
10163 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
10164 XMMRegister xmm, KRegister mask, Register length,
10165 Register temp) {
10166 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10167 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10168 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
10169 }
10170
10171
10172 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
10173 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10174 vmovdqu(dst, xmm);
10175 }
10176
10177 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
10178 fill32(Address(dst, disp), xmm);
10179 }
10180
10181 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
10182 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10183 if (!use64byteVector) {
10184 fill32(dst, xmm);
10185 fill32(dst.plus_disp(32), xmm);
10186 } else {
10187 evmovdquq(dst, xmm, Assembler::AVX_512bit);
10188 }
10189 }
10190
10191 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
10192 fill64(Address(dst, disp), xmm, use64byteVector);
10193 }
10194
10195 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
10196 Register count, Register rtmp, XMMRegister xtmp) {
10197 Label L_exit;
10198 Label L_fill_start;
10199 Label L_fill_64_bytes;
10200 Label L_fill_96_bytes;
10201 Label L_fill_128_bytes;
10202 Label L_fill_128_bytes_loop;
10203 Label L_fill_128_loop_header;
10204 Label L_fill_128_bytes_loop_header;
10205 Label L_fill_128_bytes_loop_pre_header;
10206 Label L_fill_zmm_sequence;
10207
10208 int shift = -1;
10209 switch(type) {
10210 case T_BYTE: shift = 0;
10211 break;
10212 case T_SHORT: shift = 1;
10213 break;
10214 case T_INT: shift = 2;
10215 break;
10216 /* Uncomment when LONG fill stubs are supported.
10217 case T_LONG: shift = 3;
10218 break;
10219 */
10220 default:
10221 fatal("Unhandled type: %s\n", type2name(type));
10222 }
10223
10224 if ((CopyAVX3Threshold != 0) || (MaxVectorSize == 32)) {
10225
10226 if (MaxVectorSize == 64) {
10227 cmpq(count, CopyAVX3Threshold >> shift);
10228 jcc(Assembler::greater, L_fill_zmm_sequence);
10229 }
10230
10231 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
10232
10233 bind(L_fill_start);
10234
10235 cmpq(count, 32 >> shift);
10236 jccb(Assembler::greater, L_fill_64_bytes);
10237 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
10238 jmp(L_exit);
10239
10240 bind(L_fill_64_bytes);
10241 cmpq(count, 64 >> shift);
10242 jccb(Assembler::greater, L_fill_96_bytes);
10243 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
10244 jmp(L_exit);
10245
10246 bind(L_fill_96_bytes);
10247 cmpq(count, 96 >> shift);
10248 jccb(Assembler::greater, L_fill_128_bytes);
10249 fill64(to, 0, xtmp);
10250 subq(count, 64 >> shift);
10251 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
10252 jmp(L_exit);
10253
10254 bind(L_fill_128_bytes);
10255 cmpq(count, 128 >> shift);
10256 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
10257 fill64(to, 0, xtmp);
10258 fill32(to, 64, xtmp);
10259 subq(count, 96 >> shift);
10260 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
10261 jmp(L_exit);
10262
10263 bind(L_fill_128_bytes_loop_pre_header);
10264 {
10265 mov(rtmp, to);
10266 andq(rtmp, 31);
10267 jccb(Assembler::zero, L_fill_128_bytes_loop_header);
10268 negq(rtmp);
10269 addq(rtmp, 32);
10270 mov64(r8, -1L);
10271 bzhiq(r8, r8, rtmp);
10272 kmovql(k2, r8);
10273 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10274 addq(to, rtmp);
10275 shrq(rtmp, shift);
10276 subq(count, rtmp);
10277 }
10278
10279 cmpq(count, 128 >> shift);
10280 jcc(Assembler::less, L_fill_start);
10281
10282 bind(L_fill_128_bytes_loop_header);
10283 subq(count, 128 >> shift);
10284
10285 align32();
10286 bind(L_fill_128_bytes_loop);
10287 fill64(to, 0, xtmp);
10288 fill64(to, 64, xtmp);
10289 addq(to, 128);
10290 subq(count, 128 >> shift);
10291 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10292
10293 addq(count, 128 >> shift);
10294 jcc(Assembler::zero, L_exit);
10295 jmp(L_fill_start);
10296 }
10297
10298 if (MaxVectorSize == 64) {
10299 // Sequence using 64 byte ZMM register.
10300 Label L_fill_128_bytes_zmm;
10301 Label L_fill_192_bytes_zmm;
10302 Label L_fill_192_bytes_loop_zmm;
10303 Label L_fill_192_bytes_loop_header_zmm;
10304 Label L_fill_192_bytes_loop_pre_header_zmm;
10305 Label L_fill_start_zmm_sequence;
10306
10307 bind(L_fill_zmm_sequence);
10308 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10309
10310 bind(L_fill_start_zmm_sequence);
10311 cmpq(count, 64 >> shift);
10312 jccb(Assembler::greater, L_fill_128_bytes_zmm);
10313 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10314 jmp(L_exit);
10315
10316 bind(L_fill_128_bytes_zmm);
10317 cmpq(count, 128 >> shift);
10318 jccb(Assembler::greater, L_fill_192_bytes_zmm);
10319 fill64(to, 0, xtmp, true);
10320 subq(count, 64 >> shift);
10321 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10322 jmp(L_exit);
10323
10324 bind(L_fill_192_bytes_zmm);
10325 cmpq(count, 192 >> shift);
10326 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10327 fill64(to, 0, xtmp, true);
10328 fill64(to, 64, xtmp, true);
10329 subq(count, 128 >> shift);
10330 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10331 jmp(L_exit);
10332
10333 bind(L_fill_192_bytes_loop_pre_header_zmm);
10334 {
10335 movq(rtmp, to);
10336 andq(rtmp, 63);
10337 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10338 negq(rtmp);
10339 addq(rtmp, 64);
10340 mov64(r8, -1L);
10341 bzhiq(r8, r8, rtmp);
10342 kmovql(k2, r8);
10343 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10344 addq(to, rtmp);
10345 shrq(rtmp, shift);
10346 subq(count, rtmp);
10347 }
10348
10349 cmpq(count, 192 >> shift);
10350 jcc(Assembler::less, L_fill_start_zmm_sequence);
10351
10352 bind(L_fill_192_bytes_loop_header_zmm);
10353 subq(count, 192 >> shift);
10354
10355 align32();
10356 bind(L_fill_192_bytes_loop_zmm);
10357 fill64(to, 0, xtmp, true);
10358 fill64(to, 64, xtmp, true);
10359 fill64(to, 128, xtmp, true);
10360 addq(to, 192);
10361 subq(count, 192 >> shift);
10362 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10363
10364 addq(count, 192 >> shift);
10365 jcc(Assembler::zero, L_exit);
10366 jmp(L_fill_start_zmm_sequence);
10367 }
10368 bind(L_exit);
10369 }
10370 #endif //COMPILER2_OR_JVMCI
10371
10372
10373 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10374 Label done;
10375 cvttss2sil(dst, src);
10376 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10377 cmpl(dst, 0x80000000); // float_sign_flip
10378 jccb(Assembler::notEqual, done);
10379 subptr(rsp, 8);
10380 movflt(Address(rsp, 0), src);
10381 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10382 pop(dst);
10383 bind(done);
10384 }
10385
10386 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10387 Label done;
10388 cvttsd2sil(dst, src);
10389 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10390 cmpl(dst, 0x80000000); // float_sign_flip
10391 jccb(Assembler::notEqual, done);
10392 subptr(rsp, 8);
10393 movdbl(Address(rsp, 0), src);
10394 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10395 pop(dst);
10396 bind(done);
10397 }
10398
10399 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10400 Label done;
10401 cvttss2siq(dst, src);
10402 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10403 jccb(Assembler::notEqual, done);
10404 subptr(rsp, 8);
10405 movflt(Address(rsp, 0), src);
10406 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10407 pop(dst);
10408 bind(done);
10409 }
10410
10411 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10412 // Following code is line by line assembly translation rounding algorithm.
10413 // Please refer to java.lang.Math.round(float) algorithm for details.
10414 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10415 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10416 const int32_t FloatConsts_EXP_BIAS = 127;
10417 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10418 const int32_t MINUS_32 = 0xFFFFFFE0;
10419 Label L_special_case, L_block1, L_exit;
10420 movl(rtmp, FloatConsts_EXP_BIT_MASK);
10421 movdl(dst, src);
10422 andl(dst, rtmp);
10423 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10424 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10425 subl(rtmp, dst);
10426 movl(rcx, rtmp);
10427 movl(dst, MINUS_32);
10428 testl(rtmp, dst);
10429 jccb(Assembler::notEqual, L_special_case);
10430 movdl(dst, src);
10431 andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10432 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10433 movdl(rtmp, src);
10434 testl(rtmp, rtmp);
10435 jccb(Assembler::greaterEqual, L_block1);
10436 negl(dst);
10437 bind(L_block1);
10438 sarl(dst);
10439 addl(dst, 0x1);
10440 sarl(dst, 0x1);
10441 jmp(L_exit);
10442 bind(L_special_case);
10443 convert_f2i(dst, src);
10444 bind(L_exit);
10445 }
10446
10447 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10448 // Following code is line by line assembly translation rounding algorithm.
10449 // Please refer to java.lang.Math.round(double) algorithm for details.
10450 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10451 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10452 const int64_t DoubleConsts_EXP_BIAS = 1023;
10453 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10454 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10455 Label L_special_case, L_block1, L_exit;
10456 mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10457 movq(dst, src);
10458 andq(dst, rtmp);
10459 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10460 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10461 subq(rtmp, dst);
10462 movq(rcx, rtmp);
10463 mov64(dst, MINUS_64);
10464 testq(rtmp, dst);
10465 jccb(Assembler::notEqual, L_special_case);
10466 movq(dst, src);
10467 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10468 andq(dst, rtmp);
10469 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10470 orq(dst, rtmp);
10471 movq(rtmp, src);
10472 testq(rtmp, rtmp);
10473 jccb(Assembler::greaterEqual, L_block1);
10474 negq(dst);
10475 bind(L_block1);
10476 sarq(dst);
10477 addq(dst, 0x1);
10478 sarq(dst, 0x1);
10479 jmp(L_exit);
10480 bind(L_special_case);
10481 convert_d2l(dst, src);
10482 bind(L_exit);
10483 }
10484
10485 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10486 Label done;
10487 cvttsd2siq(dst, src);
10488 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10489 jccb(Assembler::notEqual, done);
10490 subptr(rsp, 8);
10491 movdbl(Address(rsp, 0), src);
10492 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10493 pop(dst);
10494 bind(done);
10495 }
10496
10497 void MacroAssembler::cache_wb(Address line)
10498 {
10499 // 64 bit cpus always support clflush
10500 assert(VM_Version::supports_clflush(), "clflush should be available");
10501 bool optimized = VM_Version::supports_clflushopt();
10502 bool no_evict = VM_Version::supports_clwb();
10503
10504 // prefer clwb (writeback without evict) otherwise
10505 // prefer clflushopt (potentially parallel writeback with evict)
10506 // otherwise fallback on clflush (serial writeback with evict)
10507
10508 if (optimized) {
10509 if (no_evict) {
10510 clwb(line);
10511 } else {
10512 clflushopt(line);
10513 }
10514 } else {
10515 // no need for fence when using CLFLUSH
10516 clflush(line);
10517 }
10518 }
10519
10520 void MacroAssembler::cache_wbsync(bool is_pre)
10521 {
10522 assert(VM_Version::supports_clflush(), "clflush should be available");
10523 bool optimized = VM_Version::supports_clflushopt();
10524 bool no_evict = VM_Version::supports_clwb();
10525
10526 // pick the correct implementation
10527
10528 if (!is_pre && (optimized || no_evict)) {
10529 // need an sfence for post flush when using clflushopt or clwb
10530 // otherwise no no need for any synchroniaztion
10531
10532 sfence();
10533 }
10534 }
10535
10536 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10537 switch (cond) {
10538 // Note some conditions are synonyms for others
10539 case Assembler::zero: return Assembler::notZero;
10540 case Assembler::notZero: return Assembler::zero;
10541 case Assembler::less: return Assembler::greaterEqual;
10542 case Assembler::lessEqual: return Assembler::greater;
10543 case Assembler::greater: return Assembler::lessEqual;
10544 case Assembler::greaterEqual: return Assembler::less;
10545 case Assembler::below: return Assembler::aboveEqual;
10546 case Assembler::belowEqual: return Assembler::above;
10547 case Assembler::above: return Assembler::belowEqual;
10548 case Assembler::aboveEqual: return Assembler::below;
10549 case Assembler::overflow: return Assembler::noOverflow;
10550 case Assembler::noOverflow: return Assembler::overflow;
10551 case Assembler::negative: return Assembler::positive;
10552 case Assembler::positive: return Assembler::negative;
10553 case Assembler::parity: return Assembler::noParity;
10554 case Assembler::noParity: return Assembler::parity;
10555 }
10556 ShouldNotReachHere(); return Assembler::overflow;
10557 }
10558
10559 // This is simply a call to Thread::current()
10560 void MacroAssembler::get_thread_slow(Register thread) {
10561 if (thread != rax) {
10562 push(rax);
10563 }
10564 push(rdi);
10565 push(rsi);
10566 push(rdx);
10567 push(rcx);
10568 push(r8);
10569 push(r9);
10570 push(r10);
10571 push(r11);
10572
10573 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10574
10575 pop(r11);
10576 pop(r10);
10577 pop(r9);
10578 pop(r8);
10579 pop(rcx);
10580 pop(rdx);
10581 pop(rsi);
10582 pop(rdi);
10583 if (thread != rax) {
10584 mov(thread, rax);
10585 pop(rax);
10586 }
10587 }
10588
10589 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10590 Label L_stack_ok;
10591 if (bias == 0) {
10592 testptr(sp, 2 * wordSize - 1);
10593 } else {
10594 // lea(tmp, Address(rsp, bias);
10595 mov(tmp, sp);
10596 addptr(tmp, bias);
10597 testptr(tmp, 2 * wordSize - 1);
10598 }
10599 jcc(Assembler::equal, L_stack_ok);
10600 block_comment(msg);
10601 stop(msg);
10602 bind(L_stack_ok);
10603 }
10604
10605 // Implements fast-locking.
10606 //
10607 // obj: the object to be locked
10608 // reg_rax: rax
10609 // thread: the thread which attempts to lock obj
10610 // tmp: a temporary register
10611 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10612 Register thread = r15_thread;
10613
10614 assert(reg_rax == rax, "");
10615 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10616
10617 Label push;
10618 const Register top = tmp;
10619
10620 // Preload the markWord. It is important that this is the first
10621 // instruction emitted as it is part of C1's null check semantics.
10622 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10623
10624 if (UseObjectMonitorTable) {
10625 // Clear cache in case fast locking succeeds or we need to take the slow-path.
10626 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10627 }
10628
10629 if (DiagnoseSyncOnValueBasedClasses != 0) {
10630 load_klass(tmp, obj, rscratch1);
10631 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10632 jcc(Assembler::notZero, slow);
10633 }
10634
10635 // Load top.
10636 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10637
10638 // Check if the lock-stack is full.
10639 cmpl(top, LockStack::end_offset());
10640 jcc(Assembler::greaterEqual, slow);
10641
10642 // Check for recursion.
10643 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10644 jcc(Assembler::equal, push);
10645
10646 // Check header for monitor (0b10).
10647 testptr(reg_rax, markWord::monitor_value);
10648 jcc(Assembler::notZero, slow);
10649
10650 // Try to lock. Transition lock bits 0b01 => 0b00
10651 movptr(tmp, reg_rax);
10652 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10653 orptr(reg_rax, markWord::unlocked_value);
10654 // Mask inline_type bit such that we go to the slow path if object is an inline type
10655 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10656
10657 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10658 jcc(Assembler::notEqual, slow);
10659
10660 // Restore top, CAS clobbers register.
10661 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10662
10663 bind(push);
10664 // After successful lock, push object on lock-stack.
10665 movptr(Address(thread, top), obj);
10666 incrementl(top, oopSize);
10667 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10668 }
10669
10670 // Implements fast-unlocking.
10671 //
10672 // obj: the object to be unlocked
10673 // reg_rax: rax
10674 // thread: the thread
10675 // tmp: a temporary register
10676 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10677 Register thread = r15_thread;
10678
10679 assert(reg_rax == rax, "");
10680 assert_different_registers(obj, reg_rax, thread, tmp);
10681
10682 Label unlocked, push_and_slow;
10683 const Register top = tmp;
10684
10685 // Check if obj is top of lock-stack.
10686 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10687 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10688 jcc(Assembler::notEqual, slow);
10689
10690 // Pop lock-stack.
10691 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10692 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10693
10694 // Check if recursive.
10695 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10696 jcc(Assembler::equal, unlocked);
10697
10698 // Not recursive. Check header for monitor (0b10).
10699 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10700 testptr(reg_rax, markWord::monitor_value);
10701 jcc(Assembler::notZero, push_and_slow);
10702
10703 #ifdef ASSERT
10704 // Check header not unlocked (0b01).
10705 Label not_unlocked;
10706 testptr(reg_rax, markWord::unlocked_value);
10707 jcc(Assembler::zero, not_unlocked);
10708 stop("fast_unlock already unlocked");
10709 bind(not_unlocked);
10710 #endif
10711
10712 // Try to unlock. Transition lock bits 0b00 => 0b01
10713 movptr(tmp, reg_rax);
10714 orptr(tmp, markWord::unlocked_value);
10715 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10716 jcc(Assembler::equal, unlocked);
10717
10718 bind(push_and_slow);
10719 // Restore lock-stack and handle the unlock in runtime.
10720 #ifdef ASSERT
10721 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10722 movptr(Address(thread, top), obj);
10723 #endif
10724 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10725 jmp(slow);
10726
10727 bind(unlocked);
10728 }
10729
10730 // Saves legacy GPRs state on stack.
10731 void MacroAssembler::save_legacy_gprs() {
10732 subq(rsp, 16 * wordSize);
10733 movq(Address(rsp, 15 * wordSize), rax);
10734 movq(Address(rsp, 14 * wordSize), rcx);
10735 movq(Address(rsp, 13 * wordSize), rdx);
10736 movq(Address(rsp, 12 * wordSize), rbx);
10737 movq(Address(rsp, 10 * wordSize), rbp);
10738 movq(Address(rsp, 9 * wordSize), rsi);
10739 movq(Address(rsp, 8 * wordSize), rdi);
10740 movq(Address(rsp, 7 * wordSize), r8);
10741 movq(Address(rsp, 6 * wordSize), r9);
10742 movq(Address(rsp, 5 * wordSize), r10);
10743 movq(Address(rsp, 4 * wordSize), r11);
10744 movq(Address(rsp, 3 * wordSize), r12);
10745 movq(Address(rsp, 2 * wordSize), r13);
10746 movq(Address(rsp, wordSize), r14);
10747 movq(Address(rsp, 0), r15);
10748 }
10749
10750 // Resotres back legacy GPRs state from stack.
10751 void MacroAssembler::restore_legacy_gprs() {
10752 movq(r15, Address(rsp, 0));
10753 movq(r14, Address(rsp, wordSize));
10754 movq(r13, Address(rsp, 2 * wordSize));
10755 movq(r12, Address(rsp, 3 * wordSize));
10756 movq(r11, Address(rsp, 4 * wordSize));
10757 movq(r10, Address(rsp, 5 * wordSize));
10758 movq(r9, Address(rsp, 6 * wordSize));
10759 movq(r8, Address(rsp, 7 * wordSize));
10760 movq(rdi, Address(rsp, 8 * wordSize));
10761 movq(rsi, Address(rsp, 9 * wordSize));
10762 movq(rbp, Address(rsp, 10 * wordSize));
10763 movq(rbx, Address(rsp, 12 * wordSize));
10764 movq(rdx, Address(rsp, 13 * wordSize));
10765 movq(rcx, Address(rsp, 14 * wordSize));
10766 movq(rax, Address(rsp, 15 * wordSize));
10767 addq(rsp, 16 * wordSize);
10768 }
10769
10770 void MacroAssembler::load_aotrc_address(Register reg, address a) {
10771 #if INCLUDE_CDS
10772 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
10773 if (AOTCodeCache::is_on_for_dump()) {
10774 // all aotrc field addresses should be registered in the AOTCodeCache address table
10775 lea(reg, ExternalAddress(a));
10776 } else {
10777 mov64(reg, (uint64_t)a);
10778 }
10779 #else
10780 ShouldNotReachHere();
10781 #endif
10782 }
10783
10784 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10785 if (VM_Version::supports_apx_f()) {
10786 esetzucc(comparison, dst);
10787 } else {
10788 setb(comparison, dst);
10789 movzbl(dst, dst);
10790 }
10791 }