1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "compiler/compiler_globals.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "ci/ciInlineKlass.hpp"
32 #include "crc32c.h"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/tlab_globals.hpp"
37 #include "interpreter/bytecodeHistogram.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interpreterRuntime.hpp"
40 #include "jvm.h"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/accessDecorators.hpp"
44 #include "oops/compressedKlass.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/klass.inline.hpp"
47 #include "oops/resolvedFieldEntry.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/javaThread.hpp"
53 #include "runtime/jniHandles.hpp"
54 #include "runtime/objectMonitor.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/safepoint.hpp"
57 #include "runtime/safepointMechanism.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/signature_cc.hpp"
60 #include "runtime/stubRoutines.hpp"
61 #include "utilities/checkedCast.hpp"
62 #include "utilities/macros.hpp"
63 #include "vmreg_x86.inline.hpp"
64 #ifdef COMPILER2
65 #include "opto/output.hpp"
66 #endif
67
68 #ifdef PRODUCT
69 #define BLOCK_COMMENT(str) /* nothing */
70 #define STOP(error) stop(error)
71 #else
72 #define BLOCK_COMMENT(str) block_comment(str)
73 #define STOP(error) block_comment(error); stop(error)
74 #endif
75
76 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
77
78 #ifdef ASSERT
79 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
80 #endif
81
82 static const Assembler::Condition reverse[] = {
83 Assembler::noOverflow /* overflow = 0x0 */ ,
84 Assembler::overflow /* noOverflow = 0x1 */ ,
85 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
86 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
87 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
88 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
89 Assembler::above /* belowEqual = 0x6 */ ,
90 Assembler::belowEqual /* above = 0x7 */ ,
91 Assembler::positive /* negative = 0x8 */ ,
92 Assembler::negative /* positive = 0x9 */ ,
93 Assembler::noParity /* parity = 0xa */ ,
94 Assembler::parity /* noParity = 0xb */ ,
95 Assembler::greaterEqual /* less = 0xc */ ,
96 Assembler::less /* greaterEqual = 0xd */ ,
97 Assembler::greater /* lessEqual = 0xe */ ,
98 Assembler::lessEqual /* greater = 0xf, */
99
100 };
101
102
103 // Implementation of MacroAssembler
104
105 Address MacroAssembler::as_Address(AddressLiteral adr) {
106 // amd64 always does this as a pc-rel
107 // we can be absolute or disp based on the instruction type
108 // jmp/call are displacements others are absolute
109 assert(!adr.is_lval(), "must be rval");
110 assert(reachable(adr), "must be");
111 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
112
113 }
114
115 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
116 AddressLiteral base = adr.base();
117 lea(rscratch, base);
118 Address index = adr.index();
119 assert(index._disp == 0, "must not have disp"); // maybe it can?
120 Address array(rscratch, index._index, index._scale, index._disp);
121 return array;
122 }
123
124 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
125 Label L, E;
126
127 #ifdef _WIN64
128 // Windows always allocates space for it's register args
129 assert(num_args <= 4, "only register arguments supported");
130 subq(rsp, frame::arg_reg_save_area_bytes);
131 #endif
132
133 // Align stack if necessary
134 testl(rsp, 15);
135 jcc(Assembler::zero, L);
136
137 subq(rsp, 8);
138 call(RuntimeAddress(entry_point));
139 addq(rsp, 8);
140 jmp(E);
141
142 bind(L);
143 call(RuntimeAddress(entry_point));
144
145 bind(E);
146
147 #ifdef _WIN64
148 // restore stack pointer
149 addq(rsp, frame::arg_reg_save_area_bytes);
150 #endif
151 }
152
153 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
154 assert(!src2.is_lval(), "should use cmpptr");
155 assert(rscratch != noreg || always_reachable(src2), "missing");
156
157 if (reachable(src2)) {
158 cmpq(src1, as_Address(src2));
159 } else {
160 lea(rscratch, src2);
161 Assembler::cmpq(src1, Address(rscratch, 0));
162 }
163 }
164
165 int MacroAssembler::corrected_idivq(Register reg) {
166 // Full implementation of Java ldiv and lrem; checks for special
167 // case as described in JVM spec., p.243 & p.271. The function
168 // returns the (pc) offset of the idivl instruction - may be needed
169 // for implicit exceptions.
170 //
171 // normal case special case
172 //
173 // input : rax: dividend min_long
174 // reg: divisor (may not be eax/edx) -1
175 //
176 // output: rax: quotient (= rax idiv reg) min_long
177 // rdx: remainder (= rax irem reg) 0
178 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
179 static const int64_t min_long = 0x8000000000000000;
180 Label normal_case, special_case;
181
182 // check for special case
183 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
184 jcc(Assembler::notEqual, normal_case);
185 xorl(rdx, rdx); // prepare rdx for possible special case (where
186 // remainder = 0)
187 cmpq(reg, -1);
188 jcc(Assembler::equal, special_case);
189
190 // handle normal case
191 bind(normal_case);
192 cdqq();
193 int idivq_offset = offset();
194 idivq(reg);
195
196 // normal and special case exit
197 bind(special_case);
198
199 return idivq_offset;
200 }
201
202 void MacroAssembler::decrementq(Register reg, int value) {
203 if (value == min_jint) { subq(reg, value); return; }
204 if (value < 0) { incrementq(reg, -value); return; }
205 if (value == 0) { ; return; }
206 if (value == 1 && UseIncDec) { decq(reg) ; return; }
207 /* else */ { subq(reg, value) ; return; }
208 }
209
210 void MacroAssembler::decrementq(Address dst, int value) {
211 if (value == min_jint) { subq(dst, value); return; }
212 if (value < 0) { incrementq(dst, -value); return; }
213 if (value == 0) { ; return; }
214 if (value == 1 && UseIncDec) { decq(dst) ; return; }
215 /* else */ { subq(dst, value) ; return; }
216 }
217
218 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
219 assert(rscratch != noreg || always_reachable(dst), "missing");
220
221 if (reachable(dst)) {
222 incrementq(as_Address(dst));
223 } else {
224 lea(rscratch, dst);
225 incrementq(Address(rscratch, 0));
226 }
227 }
228
229 void MacroAssembler::incrementq(Register reg, int value) {
230 if (value == min_jint) { addq(reg, value); return; }
231 if (value < 0) { decrementq(reg, -value); return; }
232 if (value == 0) { ; return; }
233 if (value == 1 && UseIncDec) { incq(reg) ; return; }
234 /* else */ { addq(reg, value) ; return; }
235 }
236
237 void MacroAssembler::incrementq(Address dst, int value) {
238 if (value == min_jint) { addq(dst, value); return; }
239 if (value < 0) { decrementq(dst, -value); return; }
240 if (value == 0) { ; return; }
241 if (value == 1 && UseIncDec) { incq(dst) ; return; }
242 /* else */ { addq(dst, value) ; return; }
243 }
244
245 // 32bit can do a case table jump in one instruction but we no longer allow the base
246 // to be installed in the Address class
247 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
248 lea(rscratch, entry.base());
249 Address dispatch = entry.index();
250 assert(dispatch._base == noreg, "must be");
251 dispatch._base = rscratch;
252 jmp(dispatch);
253 }
254
255 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
256 ShouldNotReachHere(); // 64bit doesn't use two regs
257 cmpq(x_lo, y_lo);
258 }
259
260 void MacroAssembler::lea(Register dst, AddressLiteral src) {
261 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
262 }
263
264 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
265 lea(rscratch, adr);
266 movptr(dst, rscratch);
267 }
268
269 void MacroAssembler::leave() {
270 // %%% is this really better? Why not on 32bit too?
271 emit_int8((unsigned char)0xC9); // LEAVE
272 }
273
274 void MacroAssembler::lneg(Register hi, Register lo) {
275 ShouldNotReachHere(); // 64bit doesn't use two regs
276 negq(lo);
277 }
278
279 void MacroAssembler::movoop(Register dst, jobject obj) {
280 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
281 }
282
283 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
284 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
285 movq(dst, rscratch);
286 }
287
288 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
289 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
290 }
291
292 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
293 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
294 movq(dst, rscratch);
295 }
296
297 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
298 if (src.is_lval()) {
299 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
300 } else {
301 if (reachable(src)) {
302 movq(dst, as_Address(src));
303 } else {
304 lea(dst, src);
305 movq(dst, Address(dst, 0));
306 }
307 }
308 }
309
310 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
311 movq(as_Address(dst, rscratch), src);
312 }
313
314 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
315 movq(dst, as_Address(src, dst /*rscratch*/));
316 }
317
318 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
319 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
320 if (is_simm32(src)) {
321 movptr(dst, checked_cast<int32_t>(src));
322 } else {
323 mov64(rscratch, src);
324 movq(dst, rscratch);
325 }
326 }
327
328 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
329 movoop(rscratch, obj);
330 push(rscratch);
331 }
332
333 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
334 mov_metadata(rscratch, obj);
335 push(rscratch);
336 }
337
338 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
339 lea(rscratch, src);
340 if (src.is_lval()) {
341 push(rscratch);
342 } else {
343 pushq(Address(rscratch, 0));
344 }
345 }
346
347 static void pass_arg0(MacroAssembler* masm, Register arg) {
348 if (c_rarg0 != arg ) {
349 masm->mov(c_rarg0, arg);
350 }
351 }
352
353 static void pass_arg1(MacroAssembler* masm, Register arg) {
354 if (c_rarg1 != arg ) {
355 masm->mov(c_rarg1, arg);
356 }
357 }
358
359 static void pass_arg2(MacroAssembler* masm, Register arg) {
360 if (c_rarg2 != arg ) {
361 masm->mov(c_rarg2, arg);
362 }
363 }
364
365 static void pass_arg3(MacroAssembler* masm, Register arg) {
366 if (c_rarg3 != arg ) {
367 masm->mov(c_rarg3, arg);
368 }
369 }
370
371 void MacroAssembler::stop(const char* msg) {
372 if (ShowMessageBoxOnError) {
373 address rip = pc();
374 pusha(); // get regs on stack
375 lea(c_rarg1, InternalAddress(rip));
376 movq(c_rarg2, rsp); // pass pointer to regs array
377 }
378 // Skip AOT caching C strings in scratch buffer.
379 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
380 lea(c_rarg0, ExternalAddress((address) str));
381 andq(rsp, -16); // align stack as required by ABI
382 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
383 hlt();
384 }
385
386 void MacroAssembler::warn(const char* msg) {
387 push(rbp);
388 movq(rbp, rsp);
389 andq(rsp, -16); // align stack as required by push_CPU_state and call
390 push_CPU_state(); // keeps alignment at 16 bytes
391
392 #ifdef _WIN64
393 // Windows always allocates space for its register args
394 subq(rsp, frame::arg_reg_save_area_bytes);
395 #endif
396 lea(c_rarg0, ExternalAddress((address) msg));
397 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
398
399 #ifdef _WIN64
400 // restore stack pointer
401 addq(rsp, frame::arg_reg_save_area_bytes);
402 #endif
403 pop_CPU_state();
404 mov(rsp, rbp);
405 pop(rbp);
406 }
407
408 void MacroAssembler::print_state() {
409 address rip = pc();
410 pusha(); // get regs on stack
411 push(rbp);
412 movq(rbp, rsp);
413 andq(rsp, -16); // align stack as required by push_CPU_state and call
414 push_CPU_state(); // keeps alignment at 16 bytes
415
416 lea(c_rarg0, InternalAddress(rip));
417 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
418 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
419
420 pop_CPU_state();
421 mov(rsp, rbp);
422 pop(rbp);
423 popa();
424 }
425
426 #ifndef PRODUCT
427 extern "C" void findpc(intptr_t x);
428 #endif
429
430 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
431 // In order to get locks to work, we need to fake a in_VM state
432 if (ShowMessageBoxOnError) {
433 JavaThread* thread = JavaThread::current();
434 JavaThreadState saved_state = thread->thread_state();
435 thread->set_thread_state(_thread_in_vm);
436 #ifndef PRODUCT
437 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
438 ttyLocker ttyl;
439 BytecodeCounter::print();
440 }
441 #endif
442 // To see where a verify_oop failed, get $ebx+40/X for this frame.
443 // XXX correct this offset for amd64
444 // This is the value of eip which points to where verify_oop will return.
445 if (os::message_box(msg, "Execution stopped, print registers?")) {
446 print_state64(pc, regs);
447 BREAKPOINT;
448 }
449 }
450 fatal("DEBUG MESSAGE: %s", msg);
451 }
452
453 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
454 ttyLocker ttyl;
455 DebuggingContext debugging{};
456 tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
457 #ifndef PRODUCT
458 tty->cr();
459 findpc(pc);
460 tty->cr();
461 #endif
462 #define PRINT_REG(rax, value) \
463 { tty->print("%s = ", #rax); os::print_location(tty, value); }
464 PRINT_REG(rax, regs[15]);
465 PRINT_REG(rbx, regs[12]);
466 PRINT_REG(rcx, regs[14]);
467 PRINT_REG(rdx, regs[13]);
468 PRINT_REG(rdi, regs[8]);
469 PRINT_REG(rsi, regs[9]);
470 PRINT_REG(rbp, regs[10]);
471 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
472 PRINT_REG(rsp, (intptr_t)(®s[16]));
473 PRINT_REG(r8 , regs[7]);
474 PRINT_REG(r9 , regs[6]);
475 PRINT_REG(r10, regs[5]);
476 PRINT_REG(r11, regs[4]);
477 PRINT_REG(r12, regs[3]);
478 PRINT_REG(r13, regs[2]);
479 PRINT_REG(r14, regs[1]);
480 PRINT_REG(r15, regs[0]);
481 #undef PRINT_REG
482 // Print some words near the top of the stack.
483 int64_t* rsp = ®s[16];
484 int64_t* dump_sp = rsp;
485 for (int col1 = 0; col1 < 8; col1++) {
486 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
487 os::print_location(tty, *dump_sp++);
488 }
489 for (int row = 0; row < 25; row++) {
490 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
491 for (int col = 0; col < 4; col++) {
492 tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
493 }
494 tty->cr();
495 }
496 // Print some instructions around pc:
497 Disassembler::decode((address)pc-64, (address)pc);
498 tty->print_cr("--------");
499 Disassembler::decode((address)pc, (address)pc+32);
500 }
501
502 // The java_calling_convention describes stack locations as ideal slots on
503 // a frame with no abi restrictions. Since we must observe abi restrictions
504 // (like the placement of the register window) the slots must be biased by
505 // the following value.
506 static int reg2offset_in(VMReg r) {
507 // Account for saved rbp and return address
508 // This should really be in_preserve_stack_slots
509 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
510 }
511
512 static int reg2offset_out(VMReg r) {
513 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
514 }
515
516 // A long move
517 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
518
519 // The calling conventions assures us that each VMregpair is either
520 // all really one physical register or adjacent stack slots.
521
522 if (src.is_single_phys_reg() ) {
523 if (dst.is_single_phys_reg()) {
524 if (dst.first() != src.first()) {
525 mov(dst.first()->as_Register(), src.first()->as_Register());
526 }
527 } else {
528 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
529 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
530 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
531 }
532 } else if (dst.is_single_phys_reg()) {
533 assert(src.is_single_reg(), "not a stack pair");
534 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
535 } else {
536 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
537 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
538 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
539 }
540 }
541
542 // A double move
543 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
544
545 // The calling conventions assures us that each VMregpair is either
546 // all really one physical register or adjacent stack slots.
547
548 if (src.is_single_phys_reg() ) {
549 if (dst.is_single_phys_reg()) {
550 // In theory these overlap but the ordering is such that this is likely a nop
551 if ( src.first() != dst.first()) {
552 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
553 }
554 } else {
555 assert(dst.is_single_reg(), "not a stack pair");
556 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
557 }
558 } else if (dst.is_single_phys_reg()) {
559 assert(src.is_single_reg(), "not a stack pair");
560 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
561 } else {
562 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
563 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
564 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
565 }
566 }
567
568
569 // A float arg may have to do float reg int reg conversion
570 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
571 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
572
573 // The calling conventions assures us that each VMregpair is either
574 // all really one physical register or adjacent stack slots.
575
576 if (src.first()->is_stack()) {
577 if (dst.first()->is_stack()) {
578 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
579 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
580 } else {
581 // stack to reg
582 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
583 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
584 }
585 } else if (dst.first()->is_stack()) {
586 // reg to stack
587 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
588 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
589 } else {
590 // reg to reg
591 // In theory these overlap but the ordering is such that this is likely a nop
592 if ( src.first() != dst.first()) {
593 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
594 }
595 }
596 }
597
598 // On 64 bit we will store integer like items to the stack as
599 // 64 bits items (x86_32/64 abi) even though java would only store
600 // 32bits for a parameter. On 32bit it will simply be 32 bits
601 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
602 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
603 if (src.first()->is_stack()) {
604 if (dst.first()->is_stack()) {
605 // stack to stack
606 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
607 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
608 } else {
609 // stack to reg
610 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
611 }
612 } else if (dst.first()->is_stack()) {
613 // reg to stack
614 // Do we really have to sign extend???
615 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
616 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
617 } else {
618 // Do we really have to sign extend???
619 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
620 if (dst.first() != src.first()) {
621 movq(dst.first()->as_Register(), src.first()->as_Register());
622 }
623 }
624 }
625
626 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
627 if (src.first()->is_stack()) {
628 if (dst.first()->is_stack()) {
629 // stack to stack
630 movq(rax, Address(rbp, reg2offset_in(src.first())));
631 movq(Address(rsp, reg2offset_out(dst.first())), rax);
632 } else {
633 // stack to reg
634 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
635 }
636 } else if (dst.first()->is_stack()) {
637 // reg to stack
638 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
639 } else {
640 if (dst.first() != src.first()) {
641 movq(dst.first()->as_Register(), src.first()->as_Register());
642 }
643 }
644 }
645
646 // An oop arg. Must pass a handle not the oop itself
647 void MacroAssembler::object_move(OopMap* map,
648 int oop_handle_offset,
649 int framesize_in_slots,
650 VMRegPair src,
651 VMRegPair dst,
652 bool is_receiver,
653 int* receiver_offset) {
654
655 // must pass a handle. First figure out the location we use as a handle
656
657 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
658
659 // See if oop is null if it is we need no handle
660
661 if (src.first()->is_stack()) {
662
663 // Oop is already on the stack as an argument
664 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
665 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
666 if (is_receiver) {
667 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
668 }
669
670 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
671 lea(rHandle, Address(rbp, reg2offset_in(src.first())));
672 // conditionally move a null
673 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
674 } else {
675
676 // Oop is in a register we must store it to the space we reserve
677 // on the stack for oop_handles and pass a handle if oop is non-null
678
679 const Register rOop = src.first()->as_Register();
680 int oop_slot;
681 if (rOop == j_rarg0)
682 oop_slot = 0;
683 else if (rOop == j_rarg1)
684 oop_slot = 1;
685 else if (rOop == j_rarg2)
686 oop_slot = 2;
687 else if (rOop == j_rarg3)
688 oop_slot = 3;
689 else if (rOop == j_rarg4)
690 oop_slot = 4;
691 else {
692 assert(rOop == j_rarg5, "wrong register");
693 oop_slot = 5;
694 }
695
696 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
697 int offset = oop_slot*VMRegImpl::stack_slot_size;
698
699 map->set_oop(VMRegImpl::stack2reg(oop_slot));
700 // Store oop in handle area, may be null
701 movptr(Address(rsp, offset), rOop);
702 if (is_receiver) {
703 *receiver_offset = offset;
704 }
705
706 cmpptr(rOop, NULL_WORD);
707 lea(rHandle, Address(rsp, offset));
708 // conditionally move a null from the handle area where it was just stored
709 cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
710 }
711
712 // If arg is on the stack then place it otherwise it is already in correct reg.
713 if (dst.first()->is_stack()) {
714 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
715 }
716 }
717
718 void MacroAssembler::addptr(Register dst, int32_t imm32) {
719 addq(dst, imm32);
720 }
721
722 void MacroAssembler::addptr(Register dst, Register src) {
723 addq(dst, src);
724 }
725
726 void MacroAssembler::addptr(Address dst, Register src) {
727 addq(dst, src);
728 }
729
730 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
731 assert(rscratch != noreg || always_reachable(src), "missing");
732
733 if (reachable(src)) {
734 Assembler::addsd(dst, as_Address(src));
735 } else {
736 lea(rscratch, src);
737 Assembler::addsd(dst, Address(rscratch, 0));
738 }
739 }
740
741 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
742 assert(rscratch != noreg || always_reachable(src), "missing");
743
744 if (reachable(src)) {
745 addss(dst, as_Address(src));
746 } else {
747 lea(rscratch, src);
748 addss(dst, Address(rscratch, 0));
749 }
750 }
751
752 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
753 assert(rscratch != noreg || always_reachable(src), "missing");
754
755 if (reachable(src)) {
756 Assembler::addpd(dst, as_Address(src));
757 } else {
758 lea(rscratch, src);
759 Assembler::addpd(dst, Address(rscratch, 0));
760 }
761 }
762
763 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only.
764 // Stub code is generated once and never copied.
765 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
766 void MacroAssembler::align64() {
767 align(64, (uint)(uintptr_t)pc());
768 }
769
770 void MacroAssembler::align32() {
771 align(32, (uint)(uintptr_t)pc());
772 }
773
774 void MacroAssembler::align(uint modulus) {
775 // 8273459: Ensure alignment is possible with current segment alignment
776 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
777 align(modulus, offset());
778 }
779
780 void MacroAssembler::align(uint modulus, uint target) {
781 if (target % modulus != 0) {
782 nop(modulus - (target % modulus));
783 }
784 }
785
786 void MacroAssembler::push_f(XMMRegister r) {
787 subptr(rsp, wordSize);
788 movflt(Address(rsp, 0), r);
789 }
790
791 void MacroAssembler::pop_f(XMMRegister r) {
792 movflt(r, Address(rsp, 0));
793 addptr(rsp, wordSize);
794 }
795
796 void MacroAssembler::push_d(XMMRegister r) {
797 subptr(rsp, 2 * wordSize);
798 movdbl(Address(rsp, 0), r);
799 }
800
801 void MacroAssembler::pop_d(XMMRegister r) {
802 movdbl(r, Address(rsp, 0));
803 addptr(rsp, 2 * Interpreter::stackElementSize);
804 }
805
806 void MacroAssembler::push_ppx(Register src) {
807 if (VM_Version::supports_apx_f()) {
808 pushp(src);
809 } else {
810 Assembler::push(src);
811 }
812 }
813
814 void MacroAssembler::pop_ppx(Register dst) {
815 if (VM_Version::supports_apx_f()) {
816 popp(dst);
817 } else {
818 Assembler::pop(dst);
819 }
820 }
821
822 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
823 // Used in sign-masking with aligned address.
824 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
825 assert(rscratch != noreg || always_reachable(src), "missing");
826
827 if (UseAVX > 2 &&
828 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
829 (dst->encoding() >= 16)) {
830 vpand(dst, dst, src, AVX_512bit, rscratch);
831 } else if (reachable(src)) {
832 Assembler::andpd(dst, as_Address(src));
833 } else {
834 lea(rscratch, src);
835 Assembler::andpd(dst, Address(rscratch, 0));
836 }
837 }
838
839 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
840 // Used in sign-masking with aligned address.
841 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
842 assert(rscratch != noreg || always_reachable(src), "missing");
843
844 if (reachable(src)) {
845 Assembler::andps(dst, as_Address(src));
846 } else {
847 lea(rscratch, src);
848 Assembler::andps(dst, Address(rscratch, 0));
849 }
850 }
851
852 void MacroAssembler::andptr(Register dst, int32_t imm32) {
853 andq(dst, imm32);
854 }
855
856 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
857 assert(rscratch != noreg || always_reachable(src), "missing");
858
859 if (reachable(src)) {
860 andq(dst, as_Address(src));
861 } else {
862 lea(rscratch, src);
863 andq(dst, Address(rscratch, 0));
864 }
865 }
866
867 void MacroAssembler::atomic_incl(Address counter_addr) {
868 lock();
869 incrementl(counter_addr);
870 }
871
872 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
873 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
874
875 if (reachable(counter_addr)) {
876 atomic_incl(as_Address(counter_addr));
877 } else {
878 lea(rscratch, counter_addr);
879 atomic_incl(Address(rscratch, 0));
880 }
881 }
882
883 void MacroAssembler::atomic_incq(Address counter_addr) {
884 lock();
885 incrementq(counter_addr);
886 }
887
888 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
889 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
890
891 if (reachable(counter_addr)) {
892 atomic_incq(as_Address(counter_addr));
893 } else {
894 lea(rscratch, counter_addr);
895 atomic_incq(Address(rscratch, 0));
896 }
897 }
898
899 // Writes to stack successive pages until offset reached to check for
900 // stack overflow + shadow pages. This clobbers tmp.
901 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
902 movptr(tmp, rsp);
903 // Bang stack for total size given plus shadow page size.
904 // Bang one page at a time because large size can bang beyond yellow and
905 // red zones.
906 Label loop;
907 bind(loop);
908 movl(Address(tmp, (-(int)os::vm_page_size())), size );
909 subptr(tmp, (int)os::vm_page_size());
910 subl(size, (int)os::vm_page_size());
911 jcc(Assembler::greater, loop);
912
913 // Bang down shadow pages too.
914 // At this point, (tmp-0) is the last address touched, so don't
915 // touch it again. (It was touched as (tmp-pagesize) but then tmp
916 // was post-decremented.) Skip this address by starting at i=1, and
917 // touch a few more pages below. N.B. It is important to touch all
918 // the way down including all pages in the shadow zone.
919 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
920 // this could be any sized move but this is can be a debugging crumb
921 // so the bigger the better.
922 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
923 }
924 }
925
926 void MacroAssembler::reserved_stack_check() {
927 // testing if reserved zone needs to be enabled
928 Label no_reserved_zone_enabling;
929
930 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
931 jcc(Assembler::below, no_reserved_zone_enabling);
932
933 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
934 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
935 should_not_reach_here();
936
937 bind(no_reserved_zone_enabling);
938 }
939
940 void MacroAssembler::c2bool(Register x) {
941 // implements x == 0 ? 0 : 1
942 // note: must only look at least-significant byte of x
943 // since C-style booleans are stored in one byte
944 // only! (was bug)
945 andl(x, 0xFF);
946 setb(Assembler::notZero, x);
947 }
948
949 // Wouldn't need if AddressLiteral version had new name
950 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
951 Assembler::call(L, rtype);
952 }
953
954 void MacroAssembler::call(Register entry) {
955 Assembler::call(entry);
956 }
957
958 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
959 assert(rscratch != noreg || always_reachable(entry), "missing");
960
961 if (reachable(entry)) {
962 Assembler::call_literal(entry.target(), entry.rspec());
963 } else {
964 lea(rscratch, entry);
965 Assembler::call(rscratch);
966 }
967 }
968
969 void MacroAssembler::ic_call(address entry, jint method_index) {
970 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
971 // Needs full 64-bit immediate for later patching.
972 mov64(rax, (int64_t)Universe::non_oop_word());
973 call(AddressLiteral(entry, rh));
974 }
975
976 int MacroAssembler::ic_check_size() {
977 return UseCompactObjectHeaders ? 17 : 14;
978 }
979
980 int MacroAssembler::ic_check(int end_alignment) {
981 Register receiver = j_rarg0;
982 Register data = rax;
983 Register temp = rscratch1;
984
985 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
986 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
987 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
988 // before the inline cache check here, and not after
989 align(end_alignment, offset() + ic_check_size());
990
991 int uep_offset = offset();
992
993 if (UseCompactObjectHeaders) {
994 load_narrow_klass_compact(temp, receiver);
995 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
996 } else if (UseCompressedClassPointers) {
997 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
998 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
999 } else {
1000 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1001 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
1002 }
1003
1004 // if inline cache check fails, then jump to runtime routine
1005 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1006 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
1007
1008 return uep_offset;
1009 }
1010
1011 void MacroAssembler::emit_static_call_stub() {
1012 // Static stub relocation also tags the Method* in the code-stream.
1013 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1014 // This is recognized as unresolved by relocs/nativeinst/ic code.
1015 jump(RuntimeAddress(pc()));
1016 }
1017
1018 // Implementation of call_VM versions
1019
1020 void MacroAssembler::call_VM(Register oop_result,
1021 address entry_point,
1022 bool check_exceptions) {
1023 Label C, E;
1024 call(C, relocInfo::none);
1025 jmp(E);
1026
1027 bind(C);
1028 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1029 ret(0);
1030
1031 bind(E);
1032 }
1033
1034 void MacroAssembler::call_VM(Register oop_result,
1035 address entry_point,
1036 Register arg_1,
1037 bool check_exceptions) {
1038 Label C, E;
1039 call(C, relocInfo::none);
1040 jmp(E);
1041
1042 bind(C);
1043 pass_arg1(this, arg_1);
1044 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1045 ret(0);
1046
1047 bind(E);
1048 }
1049
1050 void MacroAssembler::call_VM(Register oop_result,
1051 address entry_point,
1052 Register arg_1,
1053 Register arg_2,
1054 bool check_exceptions) {
1055 Label C, E;
1056 call(C, relocInfo::none);
1057 jmp(E);
1058
1059 bind(C);
1060
1061 assert_different_registers(arg_1, c_rarg2);
1062
1063 pass_arg2(this, arg_2);
1064 pass_arg1(this, arg_1);
1065 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1066 ret(0);
1067
1068 bind(E);
1069 }
1070
1071 void MacroAssembler::call_VM(Register oop_result,
1072 address entry_point,
1073 Register arg_1,
1074 Register arg_2,
1075 Register arg_3,
1076 bool check_exceptions) {
1077 Label C, E;
1078 call(C, relocInfo::none);
1079 jmp(E);
1080
1081 bind(C);
1082
1083 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1084 assert_different_registers(arg_2, c_rarg3);
1085 pass_arg3(this, arg_3);
1086 pass_arg2(this, arg_2);
1087 pass_arg1(this, arg_1);
1088 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1089 ret(0);
1090
1091 bind(E);
1092 }
1093
1094 void MacroAssembler::call_VM(Register oop_result,
1095 Register last_java_sp,
1096 address entry_point,
1097 int number_of_arguments,
1098 bool check_exceptions) {
1099 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1100 }
1101
1102 void MacroAssembler::call_VM(Register oop_result,
1103 Register last_java_sp,
1104 address entry_point,
1105 Register arg_1,
1106 bool check_exceptions) {
1107 pass_arg1(this, arg_1);
1108 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1109 }
1110
1111 void MacroAssembler::call_VM(Register oop_result,
1112 Register last_java_sp,
1113 address entry_point,
1114 Register arg_1,
1115 Register arg_2,
1116 bool check_exceptions) {
1117
1118 assert_different_registers(arg_1, c_rarg2);
1119 pass_arg2(this, arg_2);
1120 pass_arg1(this, arg_1);
1121 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1122 }
1123
1124 void MacroAssembler::call_VM(Register oop_result,
1125 Register last_java_sp,
1126 address entry_point,
1127 Register arg_1,
1128 Register arg_2,
1129 Register arg_3,
1130 bool check_exceptions) {
1131 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1132 assert_different_registers(arg_2, c_rarg3);
1133 pass_arg3(this, arg_3);
1134 pass_arg2(this, arg_2);
1135 pass_arg1(this, arg_1);
1136 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1137 }
1138
1139 void MacroAssembler::super_call_VM(Register oop_result,
1140 Register last_java_sp,
1141 address entry_point,
1142 int number_of_arguments,
1143 bool check_exceptions) {
1144 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1145 }
1146
1147 void MacroAssembler::super_call_VM(Register oop_result,
1148 Register last_java_sp,
1149 address entry_point,
1150 Register arg_1,
1151 bool check_exceptions) {
1152 pass_arg1(this, arg_1);
1153 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1154 }
1155
1156 void MacroAssembler::super_call_VM(Register oop_result,
1157 Register last_java_sp,
1158 address entry_point,
1159 Register arg_1,
1160 Register arg_2,
1161 bool check_exceptions) {
1162
1163 assert_different_registers(arg_1, c_rarg2);
1164 pass_arg2(this, arg_2);
1165 pass_arg1(this, arg_1);
1166 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1167 }
1168
1169 void MacroAssembler::super_call_VM(Register oop_result,
1170 Register last_java_sp,
1171 address entry_point,
1172 Register arg_1,
1173 Register arg_2,
1174 Register arg_3,
1175 bool check_exceptions) {
1176 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1177 assert_different_registers(arg_2, c_rarg3);
1178 pass_arg3(this, arg_3);
1179 pass_arg2(this, arg_2);
1180 pass_arg1(this, arg_1);
1181 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1182 }
1183
1184 void MacroAssembler::call_VM_base(Register oop_result,
1185 Register last_java_sp,
1186 address entry_point,
1187 int number_of_arguments,
1188 bool check_exceptions) {
1189 Register java_thread = r15_thread;
1190
1191 // determine last_java_sp register
1192 if (!last_java_sp->is_valid()) {
1193 last_java_sp = rsp;
1194 }
1195 // debugging support
1196 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1197 #ifdef ASSERT
1198 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
1199 // r12 is the heapbase.
1200 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
1201 #endif // ASSERT
1202
1203 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1204 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1205
1206 // push java thread (becomes first argument of C function)
1207
1208 mov(c_rarg0, r15_thread);
1209
1210 // set last Java frame before call
1211 assert(last_java_sp != rbp, "can't use ebp/rbp");
1212
1213 // Only interpreter should have to set fp
1214 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
1215
1216 // do the call, remove parameters
1217 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1218
1219 #ifdef ASSERT
1220 // Check that thread register is not clobbered.
1221 guarantee(java_thread != rax, "change this code");
1222 push(rax);
1223 { Label L;
1224 get_thread_slow(rax);
1225 cmpptr(java_thread, rax);
1226 jcc(Assembler::equal, L);
1227 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
1228 bind(L);
1229 }
1230 pop(rax);
1231 #endif
1232
1233 // reset last Java frame
1234 // Only interpreter should have to clear fp
1235 reset_last_Java_frame(true);
1236
1237 // C++ interp handles this in the interpreter
1238 check_and_handle_popframe();
1239 check_and_handle_earlyret();
1240
1241 if (check_exceptions) {
1242 // check for pending exceptions (java_thread is set upon return)
1243 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1244 // This used to conditionally jump to forward_exception however it is
1245 // possible if we relocate that the branch will not reach. So we must jump
1246 // around so we can always reach
1247
1248 Label ok;
1249 jcc(Assembler::equal, ok);
1250 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1251 bind(ok);
1252 }
1253
1254 // get oop result if there is one and reset the value in the thread
1255 if (oop_result->is_valid()) {
1256 get_vm_result_oop(oop_result);
1257 }
1258 }
1259
1260 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1261 // Calculate the value for last_Java_sp somewhat subtle.
1262 // call_VM does an intermediate call which places a return address on
1263 // the stack just under the stack pointer as the user finished with it.
1264 // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
1265
1266 // We've pushed one address, correct last_Java_sp
1267 lea(rax, Address(rsp, wordSize));
1268
1269 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
1270 }
1271
1272 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
1273 void MacroAssembler::call_VM_leaf0(address entry_point) {
1274 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1275 }
1276
1277 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1278 call_VM_leaf_base(entry_point, number_of_arguments);
1279 }
1280
1281 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1282 pass_arg0(this, arg_0);
1283 call_VM_leaf(entry_point, 1);
1284 }
1285
1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1287
1288 assert_different_registers(arg_0, c_rarg1);
1289 pass_arg1(this, arg_1);
1290 pass_arg0(this, arg_0);
1291 call_VM_leaf(entry_point, 2);
1292 }
1293
1294 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1295 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1296 assert_different_registers(arg_1, c_rarg2);
1297 pass_arg2(this, arg_2);
1298 pass_arg1(this, arg_1);
1299 pass_arg0(this, arg_0);
1300 call_VM_leaf(entry_point, 3);
1301 }
1302
1303 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1304 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1305 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1306 assert_different_registers(arg_2, c_rarg3);
1307 pass_arg3(this, arg_3);
1308 pass_arg2(this, arg_2);
1309 pass_arg1(this, arg_1);
1310 pass_arg0(this, arg_0);
1311 call_VM_leaf(entry_point, 3);
1312 }
1313
1314 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1315 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1316 }
1317
1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1319 pass_arg0(this, arg_0);
1320 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1321 }
1322
1323 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1324 assert_different_registers(arg_0, c_rarg1);
1325 pass_arg1(this, arg_1);
1326 pass_arg0(this, arg_0);
1327 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1328 }
1329
1330 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1331 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1332 assert_different_registers(arg_1, c_rarg2);
1333 pass_arg2(this, arg_2);
1334 pass_arg1(this, arg_1);
1335 pass_arg0(this, arg_0);
1336 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1337 }
1338
1339 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1340 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1341 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1342 assert_different_registers(arg_2, c_rarg3);
1343 pass_arg3(this, arg_3);
1344 pass_arg2(this, arg_2);
1345 pass_arg1(this, arg_1);
1346 pass_arg0(this, arg_0);
1347 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1348 }
1349
1350 void MacroAssembler::get_vm_result_oop(Register oop_result) {
1351 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
1352 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
1353 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1354 }
1355
1356 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
1357 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
1358 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
1359 }
1360
1361 void MacroAssembler::check_and_handle_earlyret() {
1362 }
1363
1364 void MacroAssembler::check_and_handle_popframe() {
1365 }
1366
1367 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
1368 assert(rscratch != noreg || always_reachable(src1), "missing");
1369
1370 if (reachable(src1)) {
1371 cmpl(as_Address(src1), imm);
1372 } else {
1373 lea(rscratch, src1);
1374 cmpl(Address(rscratch, 0), imm);
1375 }
1376 }
1377
1378 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
1379 assert(!src2.is_lval(), "use cmpptr");
1380 assert(rscratch != noreg || always_reachable(src2), "missing");
1381
1382 if (reachable(src2)) {
1383 cmpl(src1, as_Address(src2));
1384 } else {
1385 lea(rscratch, src2);
1386 cmpl(src1, Address(rscratch, 0));
1387 }
1388 }
1389
1390 void MacroAssembler::cmp32(Register src1, int32_t imm) {
1391 Assembler::cmpl(src1, imm);
1392 }
1393
1394 void MacroAssembler::cmp32(Register src1, Address src2) {
1395 Assembler::cmpl(src1, src2);
1396 }
1397
1398 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1399 ucomisd(opr1, opr2);
1400
1401 Label L;
1402 if (unordered_is_less) {
1403 movl(dst, -1);
1404 jcc(Assembler::parity, L);
1405 jcc(Assembler::below , L);
1406 movl(dst, 0);
1407 jcc(Assembler::equal , L);
1408 increment(dst);
1409 } else { // unordered is greater
1410 movl(dst, 1);
1411 jcc(Assembler::parity, L);
1412 jcc(Assembler::above , L);
1413 movl(dst, 0);
1414 jcc(Assembler::equal , L);
1415 decrementl(dst);
1416 }
1417 bind(L);
1418 }
1419
1420 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1421 ucomiss(opr1, opr2);
1422
1423 Label L;
1424 if (unordered_is_less) {
1425 movl(dst, -1);
1426 jcc(Assembler::parity, L);
1427 jcc(Assembler::below , L);
1428 movl(dst, 0);
1429 jcc(Assembler::equal , L);
1430 increment(dst);
1431 } else { // unordered is greater
1432 movl(dst, 1);
1433 jcc(Assembler::parity, L);
1434 jcc(Assembler::above , L);
1435 movl(dst, 0);
1436 jcc(Assembler::equal , L);
1437 decrementl(dst);
1438 }
1439 bind(L);
1440 }
1441
1442
1443 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
1444 assert(rscratch != noreg || always_reachable(src1), "missing");
1445
1446 if (reachable(src1)) {
1447 cmpb(as_Address(src1), imm);
1448 } else {
1449 lea(rscratch, src1);
1450 cmpb(Address(rscratch, 0), imm);
1451 }
1452 }
1453
1454 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
1455 assert(rscratch != noreg || always_reachable(src2), "missing");
1456
1457 if (src2.is_lval()) {
1458 movptr(rscratch, src2);
1459 Assembler::cmpq(src1, rscratch);
1460 } else if (reachable(src2)) {
1461 cmpq(src1, as_Address(src2));
1462 } else {
1463 lea(rscratch, src2);
1464 Assembler::cmpq(src1, Address(rscratch, 0));
1465 }
1466 }
1467
1468 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
1469 assert(src2.is_lval(), "not a mem-mem compare");
1470 // moves src2's literal address
1471 movptr(rscratch, src2);
1472 Assembler::cmpq(src1, rscratch);
1473 }
1474
1475 void MacroAssembler::cmpoop(Register src1, Register src2) {
1476 cmpptr(src1, src2);
1477 }
1478
1479 void MacroAssembler::cmpoop(Register src1, Address src2) {
1480 cmpptr(src1, src2);
1481 }
1482
1483 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
1484 movoop(rscratch, src2);
1485 cmpptr(src1, rscratch);
1486 }
1487
1488 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
1489 assert(rscratch != noreg || always_reachable(adr), "missing");
1490
1491 if (reachable(adr)) {
1492 lock();
1493 cmpxchgptr(reg, as_Address(adr));
1494 } else {
1495 lea(rscratch, adr);
1496 lock();
1497 cmpxchgptr(reg, Address(rscratch, 0));
1498 }
1499 }
1500
1501 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
1502 cmpxchgq(reg, adr);
1503 }
1504
1505 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1506 assert(rscratch != noreg || always_reachable(src), "missing");
1507
1508 if (reachable(src)) {
1509 Assembler::comisd(dst, as_Address(src));
1510 } else {
1511 lea(rscratch, src);
1512 Assembler::comisd(dst, Address(rscratch, 0));
1513 }
1514 }
1515
1516 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1517 assert(rscratch != noreg || always_reachable(src), "missing");
1518
1519 if (reachable(src)) {
1520 Assembler::comiss(dst, as_Address(src));
1521 } else {
1522 lea(rscratch, src);
1523 Assembler::comiss(dst, Address(rscratch, 0));
1524 }
1525 }
1526
1527
1528 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
1529 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
1530
1531 Condition negated_cond = negate_condition(cond);
1532 Label L;
1533 jcc(negated_cond, L);
1534 pushf(); // Preserve flags
1535 atomic_incl(counter_addr, rscratch);
1536 popf();
1537 bind(L);
1538 }
1539
1540 int MacroAssembler::corrected_idivl(Register reg) {
1541 // Full implementation of Java idiv and irem; checks for
1542 // special case as described in JVM spec., p.243 & p.271.
1543 // The function returns the (pc) offset of the idivl
1544 // instruction - may be needed for implicit exceptions.
1545 //
1546 // normal case special case
1547 //
1548 // input : rax,: dividend min_int
1549 // reg: divisor (may not be rax,/rdx) -1
1550 //
1551 // output: rax,: quotient (= rax, idiv reg) min_int
1552 // rdx: remainder (= rax, irem reg) 0
1553 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
1554 const int min_int = 0x80000000;
1555 Label normal_case, special_case;
1556
1557 // check for special case
1558 cmpl(rax, min_int);
1559 jcc(Assembler::notEqual, normal_case);
1560 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
1561 cmpl(reg, -1);
1562 jcc(Assembler::equal, special_case);
1563
1564 // handle normal case
1565 bind(normal_case);
1566 cdql();
1567 int idivl_offset = offset();
1568 idivl(reg);
1569
1570 // normal and special case exit
1571 bind(special_case);
1572
1573 return idivl_offset;
1574 }
1575
1576
1577
1578 void MacroAssembler::decrementl(Register reg, int value) {
1579 if (value == min_jint) {subl(reg, value) ; return; }
1580 if (value < 0) { incrementl(reg, -value); return; }
1581 if (value == 0) { ; return; }
1582 if (value == 1 && UseIncDec) { decl(reg) ; return; }
1583 /* else */ { subl(reg, value) ; return; }
1584 }
1585
1586 void MacroAssembler::decrementl(Address dst, int value) {
1587 if (value == min_jint) {subl(dst, value) ; return; }
1588 if (value < 0) { incrementl(dst, -value); return; }
1589 if (value == 0) { ; return; }
1590 if (value == 1 && UseIncDec) { decl(dst) ; return; }
1591 /* else */ { subl(dst, value) ; return; }
1592 }
1593
1594 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
1595 assert(shift_value > 0, "illegal shift value");
1596 Label _is_positive;
1597 testl (reg, reg);
1598 jcc (Assembler::positive, _is_positive);
1599 int offset = (1 << shift_value) - 1 ;
1600
1601 if (offset == 1) {
1602 incrementl(reg);
1603 } else {
1604 addl(reg, offset);
1605 }
1606
1607 bind (_is_positive);
1608 sarl(reg, shift_value);
1609 }
1610
1611 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1612 assert(rscratch != noreg || always_reachable(src), "missing");
1613
1614 if (reachable(src)) {
1615 Assembler::divsd(dst, as_Address(src));
1616 } else {
1617 lea(rscratch, src);
1618 Assembler::divsd(dst, Address(rscratch, 0));
1619 }
1620 }
1621
1622 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1623 assert(rscratch != noreg || always_reachable(src), "missing");
1624
1625 if (reachable(src)) {
1626 Assembler::divss(dst, as_Address(src));
1627 } else {
1628 lea(rscratch, src);
1629 Assembler::divss(dst, Address(rscratch, 0));
1630 }
1631 }
1632
1633 void MacroAssembler::enter() {
1634 push(rbp);
1635 mov(rbp, rsp);
1636 }
1637
1638 void MacroAssembler::post_call_nop() {
1639 if (!Continuations::enabled()) {
1640 return;
1641 }
1642 InstructionMark im(this);
1643 relocate(post_call_nop_Relocation::spec());
1644 InlineSkippedInstructionsCounter skipCounter(this);
1645 emit_int8((uint8_t)0x0f);
1646 emit_int8((uint8_t)0x1f);
1647 emit_int8((uint8_t)0x84);
1648 emit_int8((uint8_t)0x00);
1649 emit_int32(0x00);
1650 }
1651
1652 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1653 assert(rscratch != noreg || always_reachable(src), "missing");
1654 if (reachable(src)) {
1655 Assembler::mulpd(dst, as_Address(src));
1656 } else {
1657 lea(rscratch, src);
1658 Assembler::mulpd(dst, Address(rscratch, 0));
1659 }
1660 }
1661
1662 // dst = c = a * b + c
1663 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1664 Assembler::vfmadd231sd(c, a, b);
1665 if (dst != c) {
1666 movdbl(dst, c);
1667 }
1668 }
1669
1670 // dst = c = a * b + c
1671 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1672 Assembler::vfmadd231ss(c, a, b);
1673 if (dst != c) {
1674 movflt(dst, c);
1675 }
1676 }
1677
1678 // dst = c = a * b + c
1679 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1680 Assembler::vfmadd231pd(c, a, b, vector_len);
1681 if (dst != c) {
1682 vmovdqu(dst, c);
1683 }
1684 }
1685
1686 // dst = c = a * b + c
1687 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1688 Assembler::vfmadd231ps(c, a, b, vector_len);
1689 if (dst != c) {
1690 vmovdqu(dst, c);
1691 }
1692 }
1693
1694 // dst = c = a * b + c
1695 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1696 Assembler::vfmadd231pd(c, a, b, vector_len);
1697 if (dst != c) {
1698 vmovdqu(dst, c);
1699 }
1700 }
1701
1702 // dst = c = a * b + c
1703 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1704 Assembler::vfmadd231ps(c, a, b, vector_len);
1705 if (dst != c) {
1706 vmovdqu(dst, c);
1707 }
1708 }
1709
1710 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
1711 assert(rscratch != noreg || always_reachable(dst), "missing");
1712
1713 if (reachable(dst)) {
1714 incrementl(as_Address(dst));
1715 } else {
1716 lea(rscratch, dst);
1717 incrementl(Address(rscratch, 0));
1718 }
1719 }
1720
1721 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
1722 incrementl(as_Address(dst, rscratch));
1723 }
1724
1725 void MacroAssembler::incrementl(Register reg, int value) {
1726 if (value == min_jint) {addl(reg, value) ; return; }
1727 if (value < 0) { decrementl(reg, -value); return; }
1728 if (value == 0) { ; return; }
1729 if (value == 1 && UseIncDec) { incl(reg) ; return; }
1730 /* else */ { addl(reg, value) ; return; }
1731 }
1732
1733 void MacroAssembler::incrementl(Address dst, int value) {
1734 if (value == min_jint) {addl(dst, value) ; return; }
1735 if (value < 0) { decrementl(dst, -value); return; }
1736 if (value == 0) { ; return; }
1737 if (value == 1 && UseIncDec) { incl(dst) ; return; }
1738 /* else */ { addl(dst, value) ; return; }
1739 }
1740
1741 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
1742 assert(rscratch != noreg || always_reachable(dst), "missing");
1743 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
1744 if (reachable(dst)) {
1745 jmp_literal(dst.target(), dst.rspec());
1746 } else {
1747 lea(rscratch, dst);
1748 jmp(rscratch);
1749 }
1750 }
1751
1752 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
1753 assert(rscratch != noreg || always_reachable(dst), "missing");
1754 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
1755 if (reachable(dst)) {
1756 InstructionMark im(this);
1757 relocate(dst.reloc());
1758 const int short_size = 2;
1759 const int long_size = 6;
1760 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
1761 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
1762 // 0111 tttn #8-bit disp
1763 emit_int8(0x70 | cc);
1764 emit_int8((offs - short_size) & 0xFF);
1765 } else {
1766 // 0000 1111 1000 tttn #32-bit disp
1767 emit_int8(0x0F);
1768 emit_int8((unsigned char)(0x80 | cc));
1769 emit_int32(offs - long_size);
1770 }
1771 } else {
1772 #ifdef ASSERT
1773 warning("reversing conditional branch");
1774 #endif /* ASSERT */
1775 Label skip;
1776 jccb(reverse[cc], skip);
1777 lea(rscratch, dst);
1778 Assembler::jmp(rscratch);
1779 bind(skip);
1780 }
1781 }
1782
1783 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
1784 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
1785 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
1786
1787 stmxcsr(mxcsr_save);
1788 movl(tmp, mxcsr_save);
1789 if (EnableX86ECoreOpts) {
1790 // The mxcsr_std has status bits set for performance on ECore
1791 orl(tmp, 0x003f);
1792 } else {
1793 // Mask out status bits (only check control and mask bits)
1794 andl(tmp, 0xFFC0);
1795 }
1796 cmp32(tmp, mxcsr_std, rscratch);
1797 }
1798
1799 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
1800 assert(rscratch != noreg || always_reachable(src), "missing");
1801
1802 if (reachable(src)) {
1803 Assembler::ldmxcsr(as_Address(src));
1804 } else {
1805 lea(rscratch, src);
1806 Assembler::ldmxcsr(Address(rscratch, 0));
1807 }
1808 }
1809
1810 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1811 int off = offset();
1812 movsbl(dst, src); // movsxb
1813 return off;
1814 }
1815
1816 // Note: load_signed_short used to be called load_signed_word.
1817 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
1818 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
1819 // The term "word" in HotSpot means a 32- or 64-bit machine word.
1820 int MacroAssembler::load_signed_short(Register dst, Address src) {
1821 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
1822 // version but this is what 64bit has always done. This seems to imply
1823 // that users are only using 32bits worth.
1824 int off = offset();
1825 movswl(dst, src); // movsxw
1826 return off;
1827 }
1828
1829 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1830 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1831 // and "3.9 Partial Register Penalties", p. 22).
1832 int off = offset();
1833 movzbl(dst, src); // movzxb
1834 return off;
1835 }
1836
1837 // Note: load_unsigned_short used to be called load_unsigned_word.
1838 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1839 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1840 // and "3.9 Partial Register Penalties", p. 22).
1841 int off = offset();
1842 movzwl(dst, src); // movzxw
1843 return off;
1844 }
1845
1846 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1847 switch (size_in_bytes) {
1848 case 8: movq(dst, src); break;
1849 case 4: movl(dst, src); break;
1850 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1851 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1852 default: ShouldNotReachHere();
1853 }
1854 }
1855
1856 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1857 switch (size_in_bytes) {
1858 case 8: movq(dst, src); break;
1859 case 4: movl(dst, src); break;
1860 case 2: movw(dst, src); break;
1861 case 1: movb(dst, src); break;
1862 default: ShouldNotReachHere();
1863 }
1864 }
1865
1866 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
1867 assert(rscratch != noreg || always_reachable(dst), "missing");
1868
1869 if (reachable(dst)) {
1870 movl(as_Address(dst), src);
1871 } else {
1872 lea(rscratch, dst);
1873 movl(Address(rscratch, 0), src);
1874 }
1875 }
1876
1877 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
1878 if (reachable(src)) {
1879 movl(dst, as_Address(src));
1880 } else {
1881 lea(dst, src);
1882 movl(dst, Address(dst, 0));
1883 }
1884 }
1885
1886 // C++ bool manipulation
1887
1888 void MacroAssembler::movbool(Register dst, Address src) {
1889 if(sizeof(bool) == 1)
1890 movb(dst, src);
1891 else if(sizeof(bool) == 2)
1892 movw(dst, src);
1893 else if(sizeof(bool) == 4)
1894 movl(dst, src);
1895 else
1896 // unsupported
1897 ShouldNotReachHere();
1898 }
1899
1900 void MacroAssembler::movbool(Address dst, bool boolconst) {
1901 if(sizeof(bool) == 1)
1902 movb(dst, (int) boolconst);
1903 else if(sizeof(bool) == 2)
1904 movw(dst, (int) boolconst);
1905 else if(sizeof(bool) == 4)
1906 movl(dst, (int) boolconst);
1907 else
1908 // unsupported
1909 ShouldNotReachHere();
1910 }
1911
1912 void MacroAssembler::movbool(Address dst, Register src) {
1913 if(sizeof(bool) == 1)
1914 movb(dst, src);
1915 else if(sizeof(bool) == 2)
1916 movw(dst, src);
1917 else if(sizeof(bool) == 4)
1918 movl(dst, src);
1919 else
1920 // unsupported
1921 ShouldNotReachHere();
1922 }
1923
1924 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1925 assert(rscratch != noreg || always_reachable(src), "missing");
1926
1927 if (reachable(src)) {
1928 movdl(dst, as_Address(src));
1929 } else {
1930 lea(rscratch, src);
1931 movdl(dst, Address(rscratch, 0));
1932 }
1933 }
1934
1935 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
1936 assert(rscratch != noreg || always_reachable(src), "missing");
1937
1938 if (reachable(src)) {
1939 movq(dst, as_Address(src));
1940 } else {
1941 lea(rscratch, src);
1942 movq(dst, Address(rscratch, 0));
1943 }
1944 }
1945
1946 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1947 assert(rscratch != noreg || always_reachable(src), "missing");
1948
1949 if (reachable(src)) {
1950 if (UseXmmLoadAndClearUpper) {
1951 movsd (dst, as_Address(src));
1952 } else {
1953 movlpd(dst, as_Address(src));
1954 }
1955 } else {
1956 lea(rscratch, src);
1957 if (UseXmmLoadAndClearUpper) {
1958 movsd (dst, Address(rscratch, 0));
1959 } else {
1960 movlpd(dst, Address(rscratch, 0));
1961 }
1962 }
1963 }
1964
1965 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
1966 assert(rscratch != noreg || always_reachable(src), "missing");
1967
1968 if (reachable(src)) {
1969 movss(dst, as_Address(src));
1970 } else {
1971 lea(rscratch, src);
1972 movss(dst, Address(rscratch, 0));
1973 }
1974 }
1975
1976 void MacroAssembler::movptr(Register dst, Register src) {
1977 movq(dst, src);
1978 }
1979
1980 void MacroAssembler::movptr(Register dst, Address src) {
1981 movq(dst, src);
1982 }
1983
1984 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
1985 void MacroAssembler::movptr(Register dst, intptr_t src) {
1986 if (is_uimm32(src)) {
1987 movl(dst, checked_cast<uint32_t>(src));
1988 } else if (is_simm32(src)) {
1989 movq(dst, checked_cast<int32_t>(src));
1990 } else {
1991 mov64(dst, src);
1992 }
1993 }
1994
1995 void MacroAssembler::movptr(Address dst, Register src) {
1996 movq(dst, src);
1997 }
1998
1999 void MacroAssembler::movptr(Address dst, int32_t src) {
2000 movslq(dst, src);
2001 }
2002
2003 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
2004 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2005 Assembler::movdqu(dst, src);
2006 }
2007
2008 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
2009 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2010 Assembler::movdqu(dst, src);
2011 }
2012
2013 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
2014 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2015 Assembler::movdqu(dst, src);
2016 }
2017
2018 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2019 assert(rscratch != noreg || always_reachable(src), "missing");
2020
2021 if (reachable(src)) {
2022 movdqu(dst, as_Address(src));
2023 } else {
2024 lea(rscratch, src);
2025 movdqu(dst, Address(rscratch, 0));
2026 }
2027 }
2028
2029 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
2030 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2031 Assembler::vmovdqu(dst, src);
2032 }
2033
2034 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
2035 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2036 Assembler::vmovdqu(dst, src);
2037 }
2038
2039 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
2040 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2041 Assembler::vmovdqu(dst, src);
2042 }
2043
2044 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2045 assert(rscratch != noreg || always_reachable(src), "missing");
2046
2047 if (reachable(src)) {
2048 vmovdqu(dst, as_Address(src));
2049 }
2050 else {
2051 lea(rscratch, src);
2052 vmovdqu(dst, Address(rscratch, 0));
2053 }
2054 }
2055
2056 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2057 assert(rscratch != noreg || always_reachable(src), "missing");
2058
2059 if (vector_len == AVX_512bit) {
2060 evmovdquq(dst, src, AVX_512bit, rscratch);
2061 } else if (vector_len == AVX_256bit) {
2062 vmovdqu(dst, src, rscratch);
2063 } else {
2064 movdqu(dst, src, rscratch);
2065 }
2066 }
2067
2068 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
2069 if (vector_len == AVX_512bit) {
2070 evmovdquq(dst, src, AVX_512bit);
2071 } else if (vector_len == AVX_256bit) {
2072 vmovdqu(dst, src);
2073 } else {
2074 movdqu(dst, src);
2075 }
2076 }
2077
2078 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
2079 if (vector_len == AVX_512bit) {
2080 evmovdquq(dst, src, AVX_512bit);
2081 } else if (vector_len == AVX_256bit) {
2082 vmovdqu(dst, src);
2083 } else {
2084 movdqu(dst, src);
2085 }
2086 }
2087
2088 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
2089 if (vector_len == AVX_512bit) {
2090 evmovdquq(dst, src, AVX_512bit);
2091 } else if (vector_len == AVX_256bit) {
2092 vmovdqu(dst, src);
2093 } else {
2094 movdqu(dst, src);
2095 }
2096 }
2097
2098 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2099 assert(rscratch != noreg || always_reachable(src), "missing");
2100
2101 if (reachable(src)) {
2102 vmovdqa(dst, as_Address(src));
2103 }
2104 else {
2105 lea(rscratch, src);
2106 vmovdqa(dst, Address(rscratch, 0));
2107 }
2108 }
2109
2110 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2111 assert(rscratch != noreg || always_reachable(src), "missing");
2112
2113 if (vector_len == AVX_512bit) {
2114 evmovdqaq(dst, src, AVX_512bit, rscratch);
2115 } else if (vector_len == AVX_256bit) {
2116 vmovdqa(dst, src, rscratch);
2117 } else {
2118 movdqa(dst, src, rscratch);
2119 }
2120 }
2121
2122 void MacroAssembler::kmov(KRegister dst, Address src) {
2123 if (VM_Version::supports_avx512bw()) {
2124 kmovql(dst, src);
2125 } else {
2126 assert(VM_Version::supports_evex(), "");
2127 kmovwl(dst, src);
2128 }
2129 }
2130
2131 void MacroAssembler::kmov(Address dst, KRegister src) {
2132 if (VM_Version::supports_avx512bw()) {
2133 kmovql(dst, src);
2134 } else {
2135 assert(VM_Version::supports_evex(), "");
2136 kmovwl(dst, src);
2137 }
2138 }
2139
2140 void MacroAssembler::kmov(KRegister dst, KRegister src) {
2141 if (VM_Version::supports_avx512bw()) {
2142 kmovql(dst, src);
2143 } else {
2144 assert(VM_Version::supports_evex(), "");
2145 kmovwl(dst, src);
2146 }
2147 }
2148
2149 void MacroAssembler::kmov(Register dst, KRegister src) {
2150 if (VM_Version::supports_avx512bw()) {
2151 kmovql(dst, src);
2152 } else {
2153 assert(VM_Version::supports_evex(), "");
2154 kmovwl(dst, src);
2155 }
2156 }
2157
2158 void MacroAssembler::kmov(KRegister dst, Register src) {
2159 if (VM_Version::supports_avx512bw()) {
2160 kmovql(dst, src);
2161 } else {
2162 assert(VM_Version::supports_evex(), "");
2163 kmovwl(dst, src);
2164 }
2165 }
2166
2167 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
2168 assert(rscratch != noreg || always_reachable(src), "missing");
2169
2170 if (reachable(src)) {
2171 kmovql(dst, as_Address(src));
2172 } else {
2173 lea(rscratch, src);
2174 kmovql(dst, Address(rscratch, 0));
2175 }
2176 }
2177
2178 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
2179 assert(rscratch != noreg || always_reachable(src), "missing");
2180
2181 if (reachable(src)) {
2182 kmovwl(dst, as_Address(src));
2183 } else {
2184 lea(rscratch, src);
2185 kmovwl(dst, Address(rscratch, 0));
2186 }
2187 }
2188
2189 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2190 int vector_len, Register rscratch) {
2191 assert(rscratch != noreg || always_reachable(src), "missing");
2192
2193 if (reachable(src)) {
2194 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
2195 } else {
2196 lea(rscratch, src);
2197 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
2198 }
2199 }
2200
2201 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2202 int vector_len, Register rscratch) {
2203 assert(rscratch != noreg || always_reachable(src), "missing");
2204
2205 if (reachable(src)) {
2206 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
2207 } else {
2208 lea(rscratch, src);
2209 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
2210 }
2211 }
2212
2213 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2214 assert(rscratch != noreg || always_reachable(src), "missing");
2215
2216 if (reachable(src)) {
2217 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
2218 } else {
2219 lea(rscratch, src);
2220 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
2221 }
2222 }
2223
2224 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2225 assert(rscratch != noreg || always_reachable(src), "missing");
2226
2227 if (reachable(src)) {
2228 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
2229 } else {
2230 lea(rscratch, src);
2231 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
2232 }
2233 }
2234
2235 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2236 assert(rscratch != noreg || always_reachable(src), "missing");
2237
2238 if (reachable(src)) {
2239 Assembler::evmovdquq(dst, as_Address(src), vector_len);
2240 } else {
2241 lea(rscratch, src);
2242 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
2243 }
2244 }
2245
2246 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2247 assert(rscratch != noreg || always_reachable(src), "missing");
2248
2249 if (reachable(src)) {
2250 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
2251 } else {
2252 lea(rscratch, src);
2253 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
2254 }
2255 }
2256
2257 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2258 assert(rscratch != noreg || always_reachable(src), "missing");
2259
2260 if (reachable(src)) {
2261 Assembler::evmovdqaq(dst, as_Address(src), vector_len);
2262 } else {
2263 lea(rscratch, src);
2264 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
2265 }
2266 }
2267
2268 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2269 assert(rscratch != noreg || always_reachable(src), "missing");
2270
2271 if (reachable(src)) {
2272 Assembler::movapd(dst, as_Address(src));
2273 } else {
2274 lea(rscratch, src);
2275 Assembler::movapd(dst, Address(rscratch, 0));
2276 }
2277 }
2278
2279 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2280 assert(rscratch != noreg || always_reachable(src), "missing");
2281
2282 if (reachable(src)) {
2283 Assembler::movdqa(dst, as_Address(src));
2284 } else {
2285 lea(rscratch, src);
2286 Assembler::movdqa(dst, Address(rscratch, 0));
2287 }
2288 }
2289
2290 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2291 assert(rscratch != noreg || always_reachable(src), "missing");
2292
2293 if (reachable(src)) {
2294 Assembler::movsd(dst, as_Address(src));
2295 } else {
2296 lea(rscratch, src);
2297 Assembler::movsd(dst, Address(rscratch, 0));
2298 }
2299 }
2300
2301 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2302 assert(rscratch != noreg || always_reachable(src), "missing");
2303
2304 if (reachable(src)) {
2305 Assembler::movss(dst, as_Address(src));
2306 } else {
2307 lea(rscratch, src);
2308 Assembler::movss(dst, Address(rscratch, 0));
2309 }
2310 }
2311
2312 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
2313 assert(rscratch != noreg || always_reachable(src), "missing");
2314
2315 if (reachable(src)) {
2316 Assembler::movddup(dst, as_Address(src));
2317 } else {
2318 lea(rscratch, src);
2319 Assembler::movddup(dst, Address(rscratch, 0));
2320 }
2321 }
2322
2323 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2324 assert(rscratch != noreg || always_reachable(src), "missing");
2325
2326 if (reachable(src)) {
2327 Assembler::vmovddup(dst, as_Address(src), vector_len);
2328 } else {
2329 lea(rscratch, src);
2330 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
2331 }
2332 }
2333
2334 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2335 assert(rscratch != noreg || always_reachable(src), "missing");
2336
2337 if (reachable(src)) {
2338 Assembler::mulsd(dst, as_Address(src));
2339 } else {
2340 lea(rscratch, src);
2341 Assembler::mulsd(dst, Address(rscratch, 0));
2342 }
2343 }
2344
2345 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2346 assert(rscratch != noreg || always_reachable(src), "missing");
2347
2348 if (reachable(src)) {
2349 Assembler::mulss(dst, as_Address(src));
2350 } else {
2351 lea(rscratch, src);
2352 Assembler::mulss(dst, Address(rscratch, 0));
2353 }
2354 }
2355
2356 void MacroAssembler::null_check(Register reg, int offset) {
2357 if (needs_explicit_null_check(offset)) {
2358 // provoke OS null exception if reg is null by
2359 // accessing M[reg] w/o changing any (non-CC) registers
2360 // NOTE: cmpl is plenty here to provoke a segv
2361 cmpptr(rax, Address(reg, 0));
2362 // Note: should probably use testl(rax, Address(reg, 0));
2363 // may be shorter code (however, this version of
2364 // testl needs to be implemented first)
2365 } else {
2366 // nothing to do, (later) access of M[reg + offset]
2367 // will provoke OS null exception if reg is null
2368 }
2369 }
2370
2371 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2372 andptr(markword, markWord::inline_type_mask_in_place);
2373 cmpptr(markword, markWord::inline_type_pattern);
2374 jcc(Assembler::equal, is_inline_type);
2375 }
2376
2377 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2378 if (can_be_null) {
2379 testptr(object, object);
2380 jcc(Assembler::zero, not_inline_type);
2381 }
2382 const int is_inline_type_mask = markWord::inline_type_pattern;
2383 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2384 andptr(tmp, is_inline_type_mask);
2385 cmpptr(tmp, is_inline_type_mask);
2386 jcc(Assembler::notEqual, not_inline_type);
2387 }
2388
2389 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2390 movl(temp_reg, flags);
2391 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2392 jcc(Assembler::notEqual, is_null_free_inline_type);
2393 }
2394
2395 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2396 movl(temp_reg, flags);
2397 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2398 jcc(Assembler::equal, not_null_free_inline_type);
2399 }
2400
2401 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2402 movl(temp_reg, flags);
2403 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
2404 jcc(Assembler::notEqual, is_flat);
2405 }
2406
2407 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2408 movl(temp_reg, flags);
2409 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
2410 jcc(Assembler::notEqual, has_null_marker);
2411 }
2412
2413 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2414 Label test_mark_word;
2415 // load mark word
2416 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2417 // check displaced
2418 testl(temp_reg, markWord::unlocked_value);
2419 jccb(Assembler::notZero, test_mark_word);
2420 // slow path use klass prototype
2421 push(rscratch1);
2422 load_prototype_header(temp_reg, oop, rscratch1);
2423 pop(rscratch1);
2424
2425 bind(test_mark_word);
2426 testl(temp_reg, test_bit);
2427 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
2428 }
2429
2430 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
2431 Label& is_flat_array) {
2432 #ifdef _LP64
2433 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2434 #else
2435 load_klass(temp_reg, oop, noreg);
2436 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2437 test_flat_array_layout(temp_reg, is_flat_array);
2438 #endif
2439 }
2440
2441 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2442 Label& is_non_flat_array) {
2443 #ifdef _LP64
2444 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2445 #else
2446 load_klass(temp_reg, oop, noreg);
2447 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2448 test_non_flat_array_layout(temp_reg, is_non_flat_array);
2449 #endif
2450 }
2451
2452 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
2453 #ifdef _LP64
2454 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2455 #else
2456 Unimplemented();
2457 #endif
2458 }
2459
2460 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2461 #ifdef _LP64
2462 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2463 #else
2464 Unimplemented();
2465 #endif
2466 }
2467
2468 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2469 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2470 jcc(Assembler::notZero, is_flat_array);
2471 }
2472
2473 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2474 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2475 jcc(Assembler::zero, is_non_flat_array);
2476 }
2477
2478 void MacroAssembler::os_breakpoint() {
2479 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
2480 // (e.g., MSVC can't call ps() otherwise)
2481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
2482 }
2483
2484 void MacroAssembler::unimplemented(const char* what) {
2485 const char* buf = nullptr;
2486 {
2487 ResourceMark rm;
2488 stringStream ss;
2489 ss.print("unimplemented: %s", what);
2490 buf = code_string(ss.as_string());
2491 }
2492 stop(buf);
2493 }
2494
2495 #define XSTATE_BV 0x200
2496
2497 void MacroAssembler::pop_CPU_state() {
2498 pop_FPU_state();
2499 pop_IU_state();
2500 }
2501
2502 void MacroAssembler::pop_FPU_state() {
2503 fxrstor(Address(rsp, 0));
2504 addptr(rsp, FPUStateSizeInWords * wordSize);
2505 }
2506
2507 void MacroAssembler::pop_IU_state() {
2508 popa();
2509 addq(rsp, 8);
2510 popf();
2511 }
2512
2513 // Save Integer and Float state
2514 // Warning: Stack must be 16 byte aligned (64bit)
2515 void MacroAssembler::push_CPU_state() {
2516 push_IU_state();
2517 push_FPU_state();
2518 }
2519
2520 void MacroAssembler::push_FPU_state() {
2521 subptr(rsp, FPUStateSizeInWords * wordSize);
2522 fxsave(Address(rsp, 0));
2523 }
2524
2525 void MacroAssembler::push_IU_state() {
2526 // Push flags first because pusha kills them
2527 pushf();
2528 // Make sure rsp stays 16-byte aligned
2529 subq(rsp, 8);
2530 pusha();
2531 }
2532
2533 void MacroAssembler::push_cont_fastpath() {
2534 if (!Continuations::enabled()) return;
2535
2536 Label L_done;
2537 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2538 jccb(Assembler::belowEqual, L_done);
2539 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
2540 bind(L_done);
2541 }
2542
2543 void MacroAssembler::pop_cont_fastpath() {
2544 if (!Continuations::enabled()) return;
2545
2546 Label L_done;
2547 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2548 jccb(Assembler::below, L_done);
2549 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
2550 bind(L_done);
2551 }
2552
2553 #ifdef ASSERT
2554 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
2555 Label no_cont;
2556 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
2557 testl(cont, cont);
2558 jcc(Assembler::zero, no_cont);
2559 stop(name);
2560 bind(no_cont);
2561 }
2562 #endif
2563
2564 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
2565 // we must set sp to zero to clear frame
2566 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
2567 // must clear fp, so that compiled frames are not confused; it is
2568 // possible that we need it only for debugging
2569 if (clear_fp) {
2570 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2571 }
2572 // Always clear the pc because it could have been set by make_walkable()
2573 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
2574 vzeroupper();
2575 }
2576
2577 void MacroAssembler::round_to(Register reg, int modulus) {
2578 addptr(reg, modulus - 1);
2579 andptr(reg, -modulus);
2580 }
2581
2582 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
2583 if (at_return) {
2584 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
2585 // we may safely use rsp instead to perform the stack watermark check.
2586 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
2587 jcc(Assembler::above, slow_path);
2588 return;
2589 }
2590 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2591 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
2592 }
2593
2594 // Calls to C land
2595 //
2596 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
2597 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
2598 // has to be reset to 0. This is required to allow proper stack traversal.
2599 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2600 Register last_java_fp,
2601 address last_java_pc,
2602 Register rscratch) {
2603 vzeroupper();
2604 // determine last_java_sp register
2605 if (!last_java_sp->is_valid()) {
2606 last_java_sp = rsp;
2607 }
2608 // last_java_fp is optional
2609 if (last_java_fp->is_valid()) {
2610 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
2611 }
2612 // last_java_pc is optional
2613 if (last_java_pc != nullptr) {
2614 Address java_pc(r15_thread,
2615 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
2616 lea(java_pc, InternalAddress(last_java_pc), rscratch);
2617 }
2618 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
2619 }
2620
2621 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2622 Register last_java_fp,
2623 Label &L,
2624 Register scratch) {
2625 lea(scratch, L);
2626 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
2627 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
2628 }
2629
2630 void MacroAssembler::shlptr(Register dst, int imm8) {
2631 shlq(dst, imm8);
2632 }
2633
2634 void MacroAssembler::shrptr(Register dst, int imm8) {
2635 shrq(dst, imm8);
2636 }
2637
2638 void MacroAssembler::sign_extend_byte(Register reg) {
2639 movsbl(reg, reg); // movsxb
2640 }
2641
2642 void MacroAssembler::sign_extend_short(Register reg) {
2643 movswl(reg, reg); // movsxw
2644 }
2645
2646 void MacroAssembler::testl(Address dst, int32_t imm32) {
2647 if (imm32 >= 0 && is8bit(imm32)) {
2648 testb(dst, imm32);
2649 } else {
2650 Assembler::testl(dst, imm32);
2651 }
2652 }
2653
2654 void MacroAssembler::testl(Register dst, int32_t imm32) {
2655 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
2656 testb(dst, imm32);
2657 } else {
2658 Assembler::testl(dst, imm32);
2659 }
2660 }
2661
2662 void MacroAssembler::testl(Register dst, AddressLiteral src) {
2663 assert(always_reachable(src), "Address should be reachable");
2664 testl(dst, as_Address(src));
2665 }
2666
2667 void MacroAssembler::testq(Address dst, int32_t imm32) {
2668 if (imm32 >= 0) {
2669 testl(dst, imm32);
2670 } else {
2671 Assembler::testq(dst, imm32);
2672 }
2673 }
2674
2675 void MacroAssembler::testq(Register dst, int32_t imm32) {
2676 if (imm32 >= 0) {
2677 testl(dst, imm32);
2678 } else {
2679 Assembler::testq(dst, imm32);
2680 }
2681 }
2682
2683 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
2684 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2685 Assembler::pcmpeqb(dst, src);
2686 }
2687
2688 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
2689 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2690 Assembler::pcmpeqw(dst, src);
2691 }
2692
2693 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2694 assert((dst->encoding() < 16),"XMM register should be 0-15");
2695 Assembler::pcmpestri(dst, src, imm8);
2696 }
2697
2698 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2699 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2700 Assembler::pcmpestri(dst, src, imm8);
2701 }
2702
2703 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2704 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2705 Assembler::pmovzxbw(dst, src);
2706 }
2707
2708 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
2709 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2710 Assembler::pmovzxbw(dst, src);
2711 }
2712
2713 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
2714 assert((src->encoding() < 16),"XMM register should be 0-15");
2715 Assembler::pmovmskb(dst, src);
2716 }
2717
2718 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
2719 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2720 Assembler::ptest(dst, src);
2721 }
2722
2723 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2724 assert(rscratch != noreg || always_reachable(src), "missing");
2725
2726 if (reachable(src)) {
2727 Assembler::sqrtss(dst, as_Address(src));
2728 } else {
2729 lea(rscratch, src);
2730 Assembler::sqrtss(dst, Address(rscratch, 0));
2731 }
2732 }
2733
2734 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2735 assert(rscratch != noreg || always_reachable(src), "missing");
2736
2737 if (reachable(src)) {
2738 Assembler::subsd(dst, as_Address(src));
2739 } else {
2740 lea(rscratch, src);
2741 Assembler::subsd(dst, Address(rscratch, 0));
2742 }
2743 }
2744
2745 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
2746 assert(rscratch != noreg || always_reachable(src), "missing");
2747
2748 if (reachable(src)) {
2749 Assembler::roundsd(dst, as_Address(src), rmode);
2750 } else {
2751 lea(rscratch, src);
2752 Assembler::roundsd(dst, Address(rscratch, 0), rmode);
2753 }
2754 }
2755
2756 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2757 assert(rscratch != noreg || always_reachable(src), "missing");
2758
2759 if (reachable(src)) {
2760 Assembler::subss(dst, as_Address(src));
2761 } else {
2762 lea(rscratch, src);
2763 Assembler::subss(dst, Address(rscratch, 0));
2764 }
2765 }
2766
2767 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2768 assert(rscratch != noreg || always_reachable(src), "missing");
2769
2770 if (reachable(src)) {
2771 Assembler::ucomisd(dst, as_Address(src));
2772 } else {
2773 lea(rscratch, src);
2774 Assembler::ucomisd(dst, Address(rscratch, 0));
2775 }
2776 }
2777
2778 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2779 assert(rscratch != noreg || always_reachable(src), "missing");
2780
2781 if (reachable(src)) {
2782 Assembler::ucomiss(dst, as_Address(src));
2783 } else {
2784 lea(rscratch, src);
2785 Assembler::ucomiss(dst, Address(rscratch, 0));
2786 }
2787 }
2788
2789 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2790 assert(rscratch != noreg || always_reachable(src), "missing");
2791
2792 // Used in sign-bit flipping with aligned address.
2793 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2794
2795 if (UseAVX > 2 &&
2796 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2797 (dst->encoding() >= 16)) {
2798 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2799 } else if (reachable(src)) {
2800 Assembler::xorpd(dst, as_Address(src));
2801 } else {
2802 lea(rscratch, src);
2803 Assembler::xorpd(dst, Address(rscratch, 0));
2804 }
2805 }
2806
2807 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
2808 if (UseAVX > 2 &&
2809 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2810 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2811 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2812 } else {
2813 Assembler::xorpd(dst, src);
2814 }
2815 }
2816
2817 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
2818 if (UseAVX > 2 &&
2819 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2820 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2821 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2822 } else {
2823 Assembler::xorps(dst, src);
2824 }
2825 }
2826
2827 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
2828 assert(rscratch != noreg || always_reachable(src), "missing");
2829
2830 // Used in sign-bit flipping with aligned address.
2831 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2832
2833 if (UseAVX > 2 &&
2834 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2835 (dst->encoding() >= 16)) {
2836 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2837 } else if (reachable(src)) {
2838 Assembler::xorps(dst, as_Address(src));
2839 } else {
2840 lea(rscratch, src);
2841 Assembler::xorps(dst, Address(rscratch, 0));
2842 }
2843 }
2844
2845 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
2846 assert(rscratch != noreg || always_reachable(src), "missing");
2847
2848 // Used in sign-bit flipping with aligned address.
2849 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
2850 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
2851 if (reachable(src)) {
2852 Assembler::pshufb(dst, as_Address(src));
2853 } else {
2854 lea(rscratch, src);
2855 Assembler::pshufb(dst, Address(rscratch, 0));
2856 }
2857 }
2858
2859 // AVX 3-operands instructions
2860
2861 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2862 assert(rscratch != noreg || always_reachable(src), "missing");
2863
2864 if (reachable(src)) {
2865 vaddsd(dst, nds, as_Address(src));
2866 } else {
2867 lea(rscratch, src);
2868 vaddsd(dst, nds, Address(rscratch, 0));
2869 }
2870 }
2871
2872 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2873 assert(rscratch != noreg || always_reachable(src), "missing");
2874
2875 if (reachable(src)) {
2876 vaddss(dst, nds, as_Address(src));
2877 } else {
2878 lea(rscratch, src);
2879 vaddss(dst, nds, Address(rscratch, 0));
2880 }
2881 }
2882
2883 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2884 assert(UseAVX > 0, "requires some form of AVX");
2885 assert(rscratch != noreg || always_reachable(src), "missing");
2886
2887 if (reachable(src)) {
2888 Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
2889 } else {
2890 lea(rscratch, src);
2891 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
2892 }
2893 }
2894
2895 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2896 assert(UseAVX > 0, "requires some form of AVX");
2897 assert(rscratch != noreg || always_reachable(src), "missing");
2898
2899 if (reachable(src)) {
2900 Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
2901 } else {
2902 lea(rscratch, src);
2903 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
2904 }
2905 }
2906
2907 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2908 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2909 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2910
2911 vandps(dst, nds, negate_field, vector_len, rscratch);
2912 }
2913
2914 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2915 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2916 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2917
2918 vandpd(dst, nds, negate_field, vector_len, rscratch);
2919 }
2920
2921 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2922 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2923 Assembler::vpaddb(dst, nds, src, vector_len);
2924 }
2925
2926 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2927 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2928 Assembler::vpaddb(dst, nds, src, vector_len);
2929 }
2930
2931 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2932 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2933 Assembler::vpaddw(dst, nds, src, vector_len);
2934 }
2935
2936 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2937 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2938 Assembler::vpaddw(dst, nds, src, vector_len);
2939 }
2940
2941 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2942 assert(rscratch != noreg || always_reachable(src), "missing");
2943
2944 if (reachable(src)) {
2945 Assembler::vpand(dst, nds, as_Address(src), vector_len);
2946 } else {
2947 lea(rscratch, src);
2948 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
2949 }
2950 }
2951
2952 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2953 assert(rscratch != noreg || always_reachable(src), "missing");
2954
2955 if (reachable(src)) {
2956 Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
2957 } else {
2958 lea(rscratch, src);
2959 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
2960 }
2961 }
2962
2963 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2964 assert(rscratch != noreg || always_reachable(src), "missing");
2965
2966 if (reachable(src)) {
2967 Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
2968 } else {
2969 lea(rscratch, src);
2970 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
2971 }
2972 }
2973
2974 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2975 assert(rscratch != noreg || always_reachable(src), "missing");
2976
2977 if (reachable(src)) {
2978 Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
2979 } else {
2980 lea(rscratch, src);
2981 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
2982 }
2983 }
2984
2985 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2986 assert(rscratch != noreg || always_reachable(src), "missing");
2987
2988 if (reachable(src)) {
2989 Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
2990 } else {
2991 lea(rscratch, src);
2992 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
2993 }
2994 }
2995
2996 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2997 assert(rscratch != noreg || always_reachable(src), "missing");
2998
2999 if (reachable(src)) {
3000 Assembler::vbroadcastss(dst, as_Address(src), vector_len);
3001 } else {
3002 lea(rscratch, src);
3003 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
3004 }
3005 }
3006
3007 // Vector float blend
3008 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3009 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3010 // WARN: Allow dst == (src1|src2), mask == scratch
3011 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3012 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3013 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
3014 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3015 if (blend_emulation && scratch_available && dst_available) {
3016 if (compute_mask) {
3017 vpsrad(scratch, mask, 32, vector_len);
3018 mask = scratch;
3019 }
3020 if (dst == src1) {
3021 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1
3022 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3023 } else {
3024 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3025 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
3026 }
3027 vpor(dst, dst, scratch, vector_len);
3028 } else {
3029 Assembler::vblendvps(dst, src1, src2, mask, vector_len);
3030 }
3031 }
3032
3033 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3034 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3035 // WARN: Allow dst == (src1|src2), mask == scratch
3036 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3037 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3038 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
3039 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3040 if (blend_emulation && scratch_available && dst_available) {
3041 if (compute_mask) {
3042 vpxor(scratch, scratch, scratch, vector_len);
3043 vpcmpgtq(scratch, scratch, mask, vector_len);
3044 mask = scratch;
3045 }
3046 if (dst == src1) {
3047 vpandn(dst, mask, src1, vector_len); // if mask == 0, src
3048 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3049 } else {
3050 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3051 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
3052 }
3053 vpor(dst, dst, scratch, vector_len);
3054 } else {
3055 Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
3056 }
3057 }
3058
3059 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3060 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3061 Assembler::vpcmpeqb(dst, nds, src, vector_len);
3062 }
3063
3064 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
3065 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3066 Assembler::vpcmpeqb(dst, src1, src2, vector_len);
3067 }
3068
3069 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3070 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3071 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3072 }
3073
3074 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3075 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3076 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3077 }
3078
3079 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3080 assert(rscratch != noreg || always_reachable(src), "missing");
3081
3082 if (reachable(src)) {
3083 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
3084 } else {
3085 lea(rscratch, src);
3086 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
3087 }
3088 }
3089
3090 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3091 int comparison, bool is_signed, int vector_len, Register rscratch) {
3092 assert(rscratch != noreg || always_reachable(src), "missing");
3093
3094 if (reachable(src)) {
3095 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3096 } else {
3097 lea(rscratch, src);
3098 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3099 }
3100 }
3101
3102 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3103 int comparison, bool is_signed, int vector_len, Register rscratch) {
3104 assert(rscratch != noreg || always_reachable(src), "missing");
3105
3106 if (reachable(src)) {
3107 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3108 } else {
3109 lea(rscratch, src);
3110 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3111 }
3112 }
3113
3114 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3115 int comparison, bool is_signed, int vector_len, Register rscratch) {
3116 assert(rscratch != noreg || always_reachable(src), "missing");
3117
3118 if (reachable(src)) {
3119 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3120 } else {
3121 lea(rscratch, src);
3122 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3123 }
3124 }
3125
3126 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3127 int comparison, bool is_signed, int vector_len, Register rscratch) {
3128 assert(rscratch != noreg || always_reachable(src), "missing");
3129
3130 if (reachable(src)) {
3131 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3132 } else {
3133 lea(rscratch, src);
3134 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3135 }
3136 }
3137
3138 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
3139 if (width == Assembler::Q) {
3140 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
3141 } else {
3142 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
3143 }
3144 }
3145
3146 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
3147 int eq_cond_enc = 0x29;
3148 int gt_cond_enc = 0x37;
3149 if (width != Assembler::Q) {
3150 eq_cond_enc = 0x74 + width;
3151 gt_cond_enc = 0x64 + width;
3152 }
3153 switch (cond) {
3154 case eq:
3155 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3156 break;
3157 case neq:
3158 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3159 vallones(xtmp, vector_len);
3160 vpxor(dst, xtmp, dst, vector_len);
3161 break;
3162 case le:
3163 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3164 vallones(xtmp, vector_len);
3165 vpxor(dst, xtmp, dst, vector_len);
3166 break;
3167 case nlt:
3168 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3169 vallones(xtmp, vector_len);
3170 vpxor(dst, xtmp, dst, vector_len);
3171 break;
3172 case lt:
3173 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3174 break;
3175 case nle:
3176 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3177 break;
3178 default:
3179 assert(false, "Should not reach here");
3180 }
3181 }
3182
3183 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3184 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3185 Assembler::vpmovzxbw(dst, src, vector_len);
3186 }
3187
3188 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
3189 assert((src->encoding() < 16),"XMM register should be 0-15");
3190 Assembler::vpmovmskb(dst, src, vector_len);
3191 }
3192
3193 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3194 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3195 Assembler::vpmullw(dst, nds, src, vector_len);
3196 }
3197
3198 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3199 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3200 Assembler::vpmullw(dst, nds, src, vector_len);
3201 }
3202
3203 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3204 assert((UseAVX > 0), "AVX support is needed");
3205 assert(rscratch != noreg || always_reachable(src), "missing");
3206
3207 if (reachable(src)) {
3208 Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
3209 } else {
3210 lea(rscratch, src);
3211 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
3212 }
3213 }
3214
3215 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3216 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3217 Assembler::vpsubb(dst, nds, src, vector_len);
3218 }
3219
3220 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3221 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3222 Assembler::vpsubb(dst, nds, src, vector_len);
3223 }
3224
3225 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3226 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3227 Assembler::vpsubw(dst, nds, src, vector_len);
3228 }
3229
3230 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3231 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3232 Assembler::vpsubw(dst, nds, src, vector_len);
3233 }
3234
3235 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3236 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3237 Assembler::vpsraw(dst, nds, shift, vector_len);
3238 }
3239
3240 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3241 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3242 Assembler::vpsraw(dst, nds, shift, vector_len);
3243 }
3244
3245 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3246 assert(UseAVX > 2,"");
3247 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3248 vector_len = 2;
3249 }
3250 Assembler::evpsraq(dst, nds, shift, vector_len);
3251 }
3252
3253 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3254 assert(UseAVX > 2,"");
3255 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3256 vector_len = 2;
3257 }
3258 Assembler::evpsraq(dst, nds, shift, vector_len);
3259 }
3260
3261 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3262 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3263 Assembler::vpsrlw(dst, nds, shift, vector_len);
3264 }
3265
3266 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3267 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3268 Assembler::vpsrlw(dst, nds, shift, vector_len);
3269 }
3270
3271 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3272 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3273 Assembler::vpsllw(dst, nds, shift, vector_len);
3274 }
3275
3276 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3277 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3278 Assembler::vpsllw(dst, nds, shift, vector_len);
3279 }
3280
3281 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
3282 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
3283 Assembler::vptest(dst, src);
3284 }
3285
3286 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3287 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3288 Assembler::punpcklbw(dst, src);
3289 }
3290
3291 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
3292 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
3293 Assembler::pshufd(dst, src, mode);
3294 }
3295
3296 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
3297 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3298 Assembler::pshuflw(dst, src, mode);
3299 }
3300
3301 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3302 assert(rscratch != noreg || always_reachable(src), "missing");
3303
3304 if (reachable(src)) {
3305 vandpd(dst, nds, as_Address(src), vector_len);
3306 } else {
3307 lea(rscratch, src);
3308 vandpd(dst, nds, Address(rscratch, 0), vector_len);
3309 }
3310 }
3311
3312 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3313 assert(rscratch != noreg || always_reachable(src), "missing");
3314
3315 if (reachable(src)) {
3316 vandps(dst, nds, as_Address(src), vector_len);
3317 } else {
3318 lea(rscratch, src);
3319 vandps(dst, nds, Address(rscratch, 0), vector_len);
3320 }
3321 }
3322
3323 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
3324 bool merge, int vector_len, Register rscratch) {
3325 assert(rscratch != noreg || always_reachable(src), "missing");
3326
3327 if (reachable(src)) {
3328 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
3329 } else {
3330 lea(rscratch, src);
3331 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
3332 }
3333 }
3334
3335 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3336 assert(rscratch != noreg || always_reachable(src), "missing");
3337
3338 if (reachable(src)) {
3339 vdivsd(dst, nds, as_Address(src));
3340 } else {
3341 lea(rscratch, src);
3342 vdivsd(dst, nds, Address(rscratch, 0));
3343 }
3344 }
3345
3346 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3347 assert(rscratch != noreg || always_reachable(src), "missing");
3348
3349 if (reachable(src)) {
3350 vdivss(dst, nds, as_Address(src));
3351 } else {
3352 lea(rscratch, src);
3353 vdivss(dst, nds, Address(rscratch, 0));
3354 }
3355 }
3356
3357 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3358 assert(rscratch != noreg || always_reachable(src), "missing");
3359
3360 if (reachable(src)) {
3361 vmulsd(dst, nds, as_Address(src));
3362 } else {
3363 lea(rscratch, src);
3364 vmulsd(dst, nds, Address(rscratch, 0));
3365 }
3366 }
3367
3368 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3369 assert(rscratch != noreg || always_reachable(src), "missing");
3370
3371 if (reachable(src)) {
3372 vmulss(dst, nds, as_Address(src));
3373 } else {
3374 lea(rscratch, src);
3375 vmulss(dst, nds, Address(rscratch, 0));
3376 }
3377 }
3378
3379 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3380 assert(rscratch != noreg || always_reachable(src), "missing");
3381
3382 if (reachable(src)) {
3383 vsubsd(dst, nds, as_Address(src));
3384 } else {
3385 lea(rscratch, src);
3386 vsubsd(dst, nds, Address(rscratch, 0));
3387 }
3388 }
3389
3390 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3391 assert(rscratch != noreg || always_reachable(src), "missing");
3392
3393 if (reachable(src)) {
3394 vsubss(dst, nds, as_Address(src));
3395 } else {
3396 lea(rscratch, src);
3397 vsubss(dst, nds, Address(rscratch, 0));
3398 }
3399 }
3400
3401 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3402 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3403 assert(rscratch != noreg || always_reachable(src), "missing");
3404
3405 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
3406 }
3407
3408 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3409 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3410 assert(rscratch != noreg || always_reachable(src), "missing");
3411
3412 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
3413 }
3414
3415 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3416 assert(rscratch != noreg || always_reachable(src), "missing");
3417
3418 if (reachable(src)) {
3419 vxorpd(dst, nds, as_Address(src), vector_len);
3420 } else {
3421 lea(rscratch, src);
3422 vxorpd(dst, nds, Address(rscratch, 0), vector_len);
3423 }
3424 }
3425
3426 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3427 assert(rscratch != noreg || always_reachable(src), "missing");
3428
3429 if (reachable(src)) {
3430 vxorps(dst, nds, as_Address(src), vector_len);
3431 } else {
3432 lea(rscratch, src);
3433 vxorps(dst, nds, Address(rscratch, 0), vector_len);
3434 }
3435 }
3436
3437 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3438 assert(rscratch != noreg || always_reachable(src), "missing");
3439
3440 if (UseAVX > 1 || (vector_len < 1)) {
3441 if (reachable(src)) {
3442 Assembler::vpxor(dst, nds, as_Address(src), vector_len);
3443 } else {
3444 lea(rscratch, src);
3445 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
3446 }
3447 } else {
3448 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
3449 }
3450 }
3451
3452 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3453 assert(rscratch != noreg || always_reachable(src), "missing");
3454
3455 if (reachable(src)) {
3456 Assembler::vpermd(dst, nds, as_Address(src), vector_len);
3457 } else {
3458 lea(rscratch, src);
3459 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
3460 }
3461 }
3462
3463 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
3464 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
3465 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
3466 // The inverted mask is sign-extended
3467 andptr(possibly_non_local, inverted_mask);
3468 }
3469
3470 void MacroAssembler::resolve_jobject(Register value,
3471 Register tmp) {
3472 Register thread = r15_thread;
3473 assert_different_registers(value, thread, tmp);
3474 Label done, tagged, weak_tagged;
3475 testptr(value, value);
3476 jcc(Assembler::zero, done); // Use null as-is.
3477 testptr(value, JNIHandles::tag_mask); // Test for tag.
3478 jcc(Assembler::notZero, tagged);
3479
3480 // Resolve local handle
3481 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
3482 verify_oop(value);
3483 jmp(done);
3484
3485 bind(tagged);
3486 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
3487 jcc(Assembler::notZero, weak_tagged);
3488
3489 // Resolve global handle
3490 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3491 verify_oop(value);
3492 jmp(done);
3493
3494 bind(weak_tagged);
3495 // Resolve jweak.
3496 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3497 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
3498 verify_oop(value);
3499
3500 bind(done);
3501 }
3502
3503 void MacroAssembler::resolve_global_jobject(Register value,
3504 Register tmp) {
3505 Register thread = r15_thread;
3506 assert_different_registers(value, thread, tmp);
3507 Label done;
3508
3509 testptr(value, value);
3510 jcc(Assembler::zero, done); // Use null as-is.
3511
3512 #ifdef ASSERT
3513 {
3514 Label valid_global_tag;
3515 testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
3516 jcc(Assembler::notZero, valid_global_tag);
3517 stop("non global jobject using resolve_global_jobject");
3518 bind(valid_global_tag);
3519 }
3520 #endif
3521
3522 // Resolve global handle
3523 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3524 verify_oop(value);
3525
3526 bind(done);
3527 }
3528
3529 void MacroAssembler::subptr(Register dst, int32_t imm32) {
3530 subq(dst, imm32);
3531 }
3532
3533 // Force generation of a 4 byte immediate value even if it fits into 8bit
3534 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
3535 subq_imm32(dst, imm32);
3536 }
3537
3538 void MacroAssembler::subptr(Register dst, Register src) {
3539 subq(dst, src);
3540 }
3541
3542 // C++ bool manipulation
3543 void MacroAssembler::testbool(Register dst) {
3544 if(sizeof(bool) == 1)
3545 testb(dst, 0xff);
3546 else if(sizeof(bool) == 2) {
3547 // testw implementation needed for two byte bools
3548 ShouldNotReachHere();
3549 } else if(sizeof(bool) == 4)
3550 testl(dst, dst);
3551 else
3552 // unsupported
3553 ShouldNotReachHere();
3554 }
3555
3556 void MacroAssembler::testptr(Register dst, Register src) {
3557 testq(dst, src);
3558 }
3559
3560 // Object / value buffer allocation...
3561 //
3562 // Kills klass and rsi on LP64
3563 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
3564 Register t1, Register t2,
3565 bool clear_fields, Label& alloc_failed)
3566 {
3567 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
3568 Register layout_size = t1;
3569 assert(new_obj == rax, "needs to be rax");
3570 assert_different_registers(klass, new_obj, t1, t2);
3571
3572 // get instance_size in InstanceKlass (scaled to a count of bytes)
3573 movl(layout_size, Address(klass, Klass::layout_helper_offset()));
3574 // test to see if it is malformed in some way
3575 testl(layout_size, Klass::_lh_instance_slow_path_bit);
3576 jcc(Assembler::notZero, slow_case_no_pop);
3577
3578 // Allocate the instance:
3579 // If TLAB is enabled:
3580 // Try to allocate in the TLAB.
3581 // If fails, go to the slow path.
3582 // Else If inline contiguous allocations are enabled:
3583 // Try to allocate in eden.
3584 // If fails due to heap end, go to slow path.
3585 //
3586 // If TLAB is enabled OR inline contiguous is enabled:
3587 // Initialize the allocation.
3588 // Exit.
3589 //
3590 // Go to slow path.
3591
3592 push(klass);
3593 if (UseTLAB) {
3594 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
3595 if (ZeroTLAB || (!clear_fields)) {
3596 // the fields have been already cleared
3597 jmp(initialize_header);
3598 } else {
3599 // initialize both the header and fields
3600 jmp(initialize_object);
3601 }
3602 } else {
3603 jmp(slow_case);
3604 }
3605
3606 // If UseTLAB is true, the object is created above and there is an initialize need.
3607 // Otherwise, skip and go to the slow path.
3608 if (UseTLAB) {
3609 if (clear_fields) {
3610 // The object is initialized before the header. If the object size is
3611 // zero, go directly to the header initialization.
3612 bind(initialize_object);
3613 if (UseCompactObjectHeaders) {
3614 assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
3615 decrement(layout_size, oopDesc::base_offset_in_bytes());
3616 } else {
3617 decrement(layout_size, sizeof(oopDesc));
3618 }
3619 jcc(Assembler::zero, initialize_header);
3620
3621 // Initialize topmost object field, divide size by 8, check if odd and
3622 // test if zero.
3623 Register zero = klass;
3624 xorl(zero, zero); // use zero reg to clear memory (shorter code)
3625 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3626
3627 #ifdef ASSERT
3628 // make sure instance_size was multiple of 8
3629 Label L;
3630 // Ignore partial flag stall after shrl() since it is debug VM
3631 jcc(Assembler::carryClear, L);
3632 stop("object size is not multiple of 2 - adjust this code");
3633 bind(L);
3634 // must be > 0, no extra check needed here
3635 #endif
3636
3637 // initialize remaining object fields: instance_size was a multiple of 8
3638 {
3639 Label loop;
3640 bind(loop);
3641 int header_size_bytes = oopDesc::header_size() * HeapWordSize;
3642 assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
3643 movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
3644 decrement(layout_size);
3645 jcc(Assembler::notZero, loop);
3646 }
3647 } // clear_fields
3648
3649 // initialize object header only.
3650 bind(initialize_header);
3651 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
3652 pop(klass);
3653 Register mark_word = t2;
3654 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
3655 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
3656 } else {
3657 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
3658 (intptr_t)markWord::prototype().value()); // header
3659 pop(klass); // get saved klass back in the register.
3660 }
3661 if (!UseCompactObjectHeaders) {
3662 xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3663 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops
3664 movptr(t2, klass); // preserve klass
3665 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed
3666 }
3667 jmp(done);
3668 }
3669
3670 bind(slow_case);
3671 pop(klass);
3672 bind(slow_case_no_pop);
3673 jmp(alloc_failed);
3674
3675 bind(done);
3676 }
3677
3678 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3679 void MacroAssembler::tlab_allocate(Register obj,
3680 Register var_size_in_bytes,
3681 int con_size_in_bytes,
3682 Register t1,
3683 Register t2,
3684 Label& slow_case) {
3685 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3686 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
3687 }
3688
3689 RegSet MacroAssembler::call_clobbered_gp_registers() {
3690 RegSet regs;
3691 regs += RegSet::of(rax, rcx, rdx);
3692 #ifndef _WINDOWS
3693 regs += RegSet::of(rsi, rdi);
3694 #endif
3695 regs += RegSet::range(r8, r11);
3696 if (UseAPX) {
3697 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
3698 }
3699 return regs;
3700 }
3701
3702 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
3703 int num_xmm_registers = XMMRegister::available_xmm_registers();
3704 #if defined(_WINDOWS)
3705 XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
3706 if (num_xmm_registers > 16) {
3707 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
3708 }
3709 return result;
3710 #else
3711 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
3712 #endif
3713 }
3714
3715 // C1 only ever uses the first double/float of the XMM register.
3716 static int xmm_save_size() { return sizeof(double); }
3717
3718 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3719 masm->movdbl(Address(rsp, offset), reg);
3720 }
3721
3722 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3723 masm->movdbl(reg, Address(rsp, offset));
3724 }
3725
3726 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
3727 bool save_fpu, int& gp_area_size, int& xmm_area_size) {
3728
3729 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
3730 StackAlignmentInBytes);
3731 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
3732
3733 return gp_area_size + xmm_area_size;
3734 }
3735
3736 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
3737 block_comment("push_call_clobbered_registers start");
3738 // Regular registers
3739 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
3740
3741 int gp_area_size;
3742 int xmm_area_size;
3743 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
3744 gp_area_size, xmm_area_size);
3745 subptr(rsp, total_save_size);
3746
3747 push_set(gp_registers_to_push, 0);
3748
3749 if (save_fpu) {
3750 push_set(call_clobbered_xmm_registers(), gp_area_size);
3751 }
3752
3753 block_comment("push_call_clobbered_registers end");
3754 }
3755
3756 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
3757 block_comment("pop_call_clobbered_registers start");
3758
3759 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
3760
3761 int gp_area_size;
3762 int xmm_area_size;
3763 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
3764 gp_area_size, xmm_area_size);
3765
3766 if (restore_fpu) {
3767 pop_set(call_clobbered_xmm_registers(), gp_area_size);
3768 }
3769
3770 pop_set(gp_registers_to_pop, 0);
3771
3772 addptr(rsp, total_save_size);
3773
3774 vzeroupper();
3775
3776 block_comment("pop_call_clobbered_registers end");
3777 }
3778
3779 void MacroAssembler::push_set(XMMRegSet set, int offset) {
3780 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
3781 int spill_offset = offset;
3782
3783 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
3784 save_xmm_register(this, spill_offset, *it);
3785 spill_offset += xmm_save_size();
3786 }
3787 }
3788
3789 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
3790 int restore_size = set.size() * xmm_save_size();
3791 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
3792
3793 int restore_offset = offset + restore_size - xmm_save_size();
3794
3795 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
3796 restore_xmm_register(this, restore_offset, *it);
3797 restore_offset -= xmm_save_size();
3798 }
3799 }
3800
3801 void MacroAssembler::push_set(RegSet set, int offset) {
3802 int spill_offset;
3803 if (offset == -1) {
3804 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3805 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
3806 subptr(rsp, aligned_size);
3807 spill_offset = 0;
3808 } else {
3809 spill_offset = offset;
3810 }
3811
3812 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
3813 movptr(Address(rsp, spill_offset), *it);
3814 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3815 }
3816 }
3817
3818 void MacroAssembler::pop_set(RegSet set, int offset) {
3819
3820 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3821 int restore_size = set.size() * gp_reg_size;
3822 int aligned_size = align_up(restore_size, StackAlignmentInBytes);
3823
3824 int restore_offset;
3825 if (offset == -1) {
3826 restore_offset = restore_size - gp_reg_size;
3827 } else {
3828 restore_offset = offset + restore_size - gp_reg_size;
3829 }
3830 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
3831 movptr(*it, Address(rsp, restore_offset));
3832 restore_offset -= gp_reg_size;
3833 }
3834
3835 if (offset == -1) {
3836 addptr(rsp, aligned_size);
3837 }
3838 }
3839
3840 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3841 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3842 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3843 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3844 Label done;
3845
3846 testptr(length_in_bytes, length_in_bytes);
3847 jcc(Assembler::zero, done);
3848
3849 // initialize topmost word, divide index by 2, check if odd and test if zero
3850 // note: for the remaining code to work, index must be a multiple of BytesPerWord
3851 #ifdef ASSERT
3852 {
3853 Label L;
3854 testptr(length_in_bytes, BytesPerWord - 1);
3855 jcc(Assembler::zero, L);
3856 stop("length must be a multiple of BytesPerWord");
3857 bind(L);
3858 }
3859 #endif
3860 Register index = length_in_bytes;
3861 xorptr(temp, temp); // use _zero reg to clear memory (shorter code)
3862 if (UseIncDec) {
3863 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
3864 } else {
3865 shrptr(index, 2); // use 2 instructions to avoid partial flag stall
3866 shrptr(index, 1);
3867 }
3868
3869 // initialize remaining object fields: index is a multiple of 2 now
3870 {
3871 Label loop;
3872 bind(loop);
3873 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
3874 decrement(index);
3875 jcc(Assembler::notZero, loop);
3876 }
3877
3878 bind(done);
3879 }
3880
3881 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
3882 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
3883 #ifdef ASSERT
3884 {
3885 Label done;
3886 cmpptr(layout_info, 0);
3887 jcc(Assembler::notEqual, done);
3888 stop("inline_layout_info_array is null");
3889 bind(done);
3890 }
3891 #endif
3892
3893 InlineLayoutInfo array[2];
3894 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
3895 if (is_power_of_2(size)) {
3896 shll(index, log2i_exact(size)); // Scale index by power of 2
3897 } else {
3898 imull(index, index, size); // Scale the index to be the entry index * array_element_size
3899 }
3900 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
3901 }
3902
3903 // Look up the method for a megamorphic invokeinterface call.
3904 // The target method is determined by <intf_klass, itable_index>.
3905 // The receiver klass is in recv_klass.
3906 // On success, the result will be in method_result, and execution falls through.
3907 // On failure, execution transfers to the given label.
3908 void MacroAssembler::lookup_interface_method(Register recv_klass,
3909 Register intf_klass,
3910 RegisterOrConstant itable_index,
3911 Register method_result,
3912 Register scan_temp,
3913 Label& L_no_such_interface,
3914 bool return_method) {
3915 assert_different_registers(recv_klass, intf_klass, scan_temp);
3916 assert_different_registers(method_result, intf_klass, scan_temp);
3917 assert(recv_klass != method_result || !return_method,
3918 "recv_klass can be destroyed when method isn't needed");
3919
3920 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3921 "caller must use same register for non-constant itable index as for method");
3922
3923 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3924 int vtable_base = in_bytes(Klass::vtable_start_offset());
3925 int itentry_off = in_bytes(itableMethodEntry::method_offset());
3926 int scan_step = itableOffsetEntry::size() * wordSize;
3927 int vte_size = vtableEntry::size_in_bytes();
3928 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3929 assert(vte_size == wordSize, "else adjust times_vte_scale");
3930
3931 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
3932
3933 // Could store the aligned, prescaled offset in the klass.
3934 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
3935
3936 if (return_method) {
3937 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
3938 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
3939 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
3940 }
3941
3942 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
3943 // if (scan->interface() == intf) {
3944 // result = (klass + scan->offset() + itable_index);
3945 // }
3946 // }
3947 Label search, found_method;
3948
3949 for (int peel = 1; peel >= 0; peel--) {
3950 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
3951 cmpptr(intf_klass, method_result);
3952
3953 if (peel) {
3954 jccb(Assembler::equal, found_method);
3955 } else {
3956 jccb(Assembler::notEqual, search);
3957 // (invert the test to fall through to found_method...)
3958 }
3959
3960 if (!peel) break;
3961
3962 bind(search);
3963
3964 // Check that the previous entry is non-null. A null entry means that
3965 // the receiver class doesn't implement the interface, and wasn't the
3966 // same as when the caller was compiled.
3967 testptr(method_result, method_result);
3968 jcc(Assembler::zero, L_no_such_interface);
3969 addptr(scan_temp, scan_step);
3970 }
3971
3972 bind(found_method);
3973
3974 if (return_method) {
3975 // Got a hit.
3976 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
3977 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
3978 }
3979 }
3980
3981 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
3982 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
3983 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
3984 // The target method is determined by <holder_klass, itable_index>.
3985 // The receiver klass is in recv_klass.
3986 // On success, the result will be in method_result, and execution falls through.
3987 // On failure, execution transfers to the given label.
3988 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
3989 Register holder_klass,
3990 Register resolved_klass,
3991 Register method_result,
3992 Register scan_temp,
3993 Register temp_reg2,
3994 Register receiver,
3995 int itable_index,
3996 Label& L_no_such_interface) {
3997 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
3998 Register temp_itbl_klass = method_result;
3999 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
4000
4001 int vtable_base = in_bytes(Klass::vtable_start_offset());
4002 int itentry_off = in_bytes(itableMethodEntry::method_offset());
4003 int scan_step = itableOffsetEntry::size() * wordSize;
4004 int vte_size = vtableEntry::size_in_bytes();
4005 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
4006 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
4007 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4008 assert(vte_size == wordSize, "adjust times_vte_scale");
4009
4010 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
4011
4012 // temp_itbl_klass = recv_klass.itable[0]
4013 // scan_temp = &recv_klass.itable[0] + step
4014 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
4015 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
4016 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
4017 xorptr(temp_reg, temp_reg);
4018
4019 // Initial checks:
4020 // - if (holder_klass != resolved_klass), go to "scan for resolved"
4021 // - if (itable[0] == 0), no such interface
4022 // - if (itable[0] == holder_klass), shortcut to "holder found"
4023 cmpptr(holder_klass, resolved_klass);
4024 jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
4025 testptr(temp_itbl_klass, temp_itbl_klass);
4026 jccb(Assembler::zero, L_no_such_interface);
4027 cmpptr(holder_klass, temp_itbl_klass);
4028 jccb(Assembler::equal, L_holder_found);
4029
4030 // Loop: Look for holder_klass record in itable
4031 // do {
4032 // tmp = itable[index];
4033 // index += step;
4034 // if (tmp == holder_klass) {
4035 // goto L_holder_found; // Found!
4036 // }
4037 // } while (tmp != 0);
4038 // goto L_no_such_interface // Not found.
4039 Label L_scan_holder;
4040 bind(L_scan_holder);
4041 movptr(temp_itbl_klass, Address(scan_temp, 0));
4042 addptr(scan_temp, scan_step);
4043 cmpptr(holder_klass, temp_itbl_klass);
4044 jccb(Assembler::equal, L_holder_found);
4045 testptr(temp_itbl_klass, temp_itbl_klass);
4046 jccb(Assembler::notZero, L_scan_holder);
4047
4048 jmpb(L_no_such_interface);
4049
4050 // Loop: Look for resolved_class record in itable
4051 // do {
4052 // tmp = itable[index];
4053 // index += step;
4054 // if (tmp == holder_klass) {
4055 // // Also check if we have met a holder klass
4056 // holder_tmp = itable[index-step-ioffset];
4057 // }
4058 // if (tmp == resolved_klass) {
4059 // goto L_resolved_found; // Found!
4060 // }
4061 // } while (tmp != 0);
4062 // goto L_no_such_interface // Not found.
4063 //
4064 Label L_loop_scan_resolved;
4065 bind(L_loop_scan_resolved);
4066 movptr(temp_itbl_klass, Address(scan_temp, 0));
4067 addptr(scan_temp, scan_step);
4068 bind(L_loop_scan_resolved_entry);
4069 cmpptr(holder_klass, temp_itbl_klass);
4070 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4071 cmpptr(resolved_klass, temp_itbl_klass);
4072 jccb(Assembler::equal, L_resolved_found);
4073 testptr(temp_itbl_klass, temp_itbl_klass);
4074 jccb(Assembler::notZero, L_loop_scan_resolved);
4075
4076 jmpb(L_no_such_interface);
4077
4078 Label L_ready;
4079
4080 // See if we already have a holder klass. If not, go and scan for it.
4081 bind(L_resolved_found);
4082 testptr(temp_reg, temp_reg);
4083 jccb(Assembler::zero, L_scan_holder);
4084 jmpb(L_ready);
4085
4086 bind(L_holder_found);
4087 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4088
4089 // Finally, temp_reg contains holder_klass vtable offset
4090 bind(L_ready);
4091 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4092 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
4093 load_klass(scan_temp, receiver, noreg);
4094 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4095 } else {
4096 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4097 }
4098 }
4099
4100
4101 // virtual method calling
4102 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4103 RegisterOrConstant vtable_index,
4104 Register method_result) {
4105 const ByteSize base = Klass::vtable_start_offset();
4106 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4107 Address vtable_entry_addr(recv_klass,
4108 vtable_index, Address::times_ptr,
4109 base + vtableEntry::method_offset());
4110 movptr(method_result, vtable_entry_addr);
4111 }
4112
4113
4114 void MacroAssembler::check_klass_subtype(Register sub_klass,
4115 Register super_klass,
4116 Register temp_reg,
4117 Label& L_success) {
4118 Label L_failure;
4119 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
4120 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
4121 bind(L_failure);
4122 }
4123
4124
4125 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4126 Register super_klass,
4127 Register temp_reg,
4128 Label* L_success,
4129 Label* L_failure,
4130 Label* L_slow_path,
4131 RegisterOrConstant super_check_offset) {
4132 assert_different_registers(sub_klass, super_klass, temp_reg);
4133 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4134 if (super_check_offset.is_register()) {
4135 assert_different_registers(sub_klass, super_klass,
4136 super_check_offset.as_register());
4137 } else if (must_load_sco) {
4138 assert(temp_reg != noreg, "supply either a temp or a register offset");
4139 }
4140
4141 Label L_fallthrough;
4142 int label_nulls = 0;
4143 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4144 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4145 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
4146 assert(label_nulls <= 1, "at most one null in the batch");
4147
4148 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4149 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4150 Address super_check_offset_addr(super_klass, sco_offset);
4151
4152 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4153 // range of a jccb. If this routine grows larger, reconsider at
4154 // least some of these.
4155 #define local_jcc(assembler_cond, label) \
4156 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4157 else jcc( assembler_cond, label) /*omit semi*/
4158
4159 // Hacked jmp, which may only be used just before L_fallthrough.
4160 #define final_jmp(label) \
4161 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4162 else jmp(label) /*omit semi*/
4163
4164 // If the pointers are equal, we are done (e.g., String[] elements).
4165 // This self-check enables sharing of secondary supertype arrays among
4166 // non-primary types such as array-of-interface. Otherwise, each such
4167 // type would need its own customized SSA.
4168 // We move this check to the front of the fast path because many
4169 // type checks are in fact trivially successful in this manner,
4170 // so we get a nicely predicted branch right at the start of the check.
4171 cmpptr(sub_klass, super_klass);
4172 local_jcc(Assembler::equal, *L_success);
4173
4174 // Check the supertype display:
4175 if (must_load_sco) {
4176 // Positive movl does right thing on LP64.
4177 movl(temp_reg, super_check_offset_addr);
4178 super_check_offset = RegisterOrConstant(temp_reg);
4179 }
4180 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4181 cmpptr(super_klass, super_check_addr); // load displayed supertype
4182
4183 // This check has worked decisively for primary supers.
4184 // Secondary supers are sought in the super_cache ('super_cache_addr').
4185 // (Secondary supers are interfaces and very deeply nested subtypes.)
4186 // This works in the same check above because of a tricky aliasing
4187 // between the super_cache and the primary super display elements.
4188 // (The 'super_check_addr' can address either, as the case requires.)
4189 // Note that the cache is updated below if it does not help us find
4190 // what we need immediately.
4191 // So if it was a primary super, we can just fail immediately.
4192 // Otherwise, it's the slow path for us (no success at this point).
4193
4194 if (super_check_offset.is_register()) {
4195 local_jcc(Assembler::equal, *L_success);
4196 cmpl(super_check_offset.as_register(), sc_offset);
4197 if (L_failure == &L_fallthrough) {
4198 local_jcc(Assembler::equal, *L_slow_path);
4199 } else {
4200 local_jcc(Assembler::notEqual, *L_failure);
4201 final_jmp(*L_slow_path);
4202 }
4203 } else if (super_check_offset.as_constant() == sc_offset) {
4204 // Need a slow path; fast failure is impossible.
4205 if (L_slow_path == &L_fallthrough) {
4206 local_jcc(Assembler::equal, *L_success);
4207 } else {
4208 local_jcc(Assembler::notEqual, *L_slow_path);
4209 final_jmp(*L_success);
4210 }
4211 } else {
4212 // No slow path; it's a fast decision.
4213 if (L_failure == &L_fallthrough) {
4214 local_jcc(Assembler::equal, *L_success);
4215 } else {
4216 local_jcc(Assembler::notEqual, *L_failure);
4217 final_jmp(*L_success);
4218 }
4219 }
4220
4221 bind(L_fallthrough);
4222
4223 #undef local_jcc
4224 #undef final_jmp
4225 }
4226
4227
4228 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
4229 Register super_klass,
4230 Register temp_reg,
4231 Register temp2_reg,
4232 Label* L_success,
4233 Label* L_failure,
4234 bool set_cond_codes) {
4235 assert_different_registers(sub_klass, super_klass, temp_reg);
4236 if (temp2_reg != noreg)
4237 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4238 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4239
4240 Label L_fallthrough;
4241 int label_nulls = 0;
4242 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4243 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4244 assert(label_nulls <= 1, "at most one null in the batch");
4245
4246 // a couple of useful fields in sub_klass:
4247 int ss_offset = in_bytes(Klass::secondary_supers_offset());
4248 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4249 Address secondary_supers_addr(sub_klass, ss_offset);
4250 Address super_cache_addr( sub_klass, sc_offset);
4251
4252 // Do a linear scan of the secondary super-klass chain.
4253 // This code is rarely used, so simplicity is a virtue here.
4254 // The repne_scan instruction uses fixed registers, which we must spill.
4255 // Don't worry too much about pre-existing connections with the input regs.
4256
4257 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
4258 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
4259
4260 // Get super_klass value into rax (even if it was in rdi or rcx).
4261 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
4262 if (super_klass != rax) {
4263 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
4264 mov(rax, super_klass);
4265 }
4266 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
4267 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
4268
4269 #ifndef PRODUCT
4270 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
4271 ExternalAddress pst_counter_addr((address) pst_counter);
4272 lea(rcx, pst_counter_addr);
4273 incrementl(Address(rcx, 0));
4274 #endif //PRODUCT
4275
4276 // We will consult the secondary-super array.
4277 movptr(rdi, secondary_supers_addr);
4278 // Load the array length. (Positive movl does right thing on LP64.)
4279 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
4280 // Skip to start of data.
4281 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
4282
4283 // Scan RCX words at [RDI] for an occurrence of RAX.
4284 // Set NZ/Z based on last compare.
4285 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
4286 // not change flags (only scas instruction which is repeated sets flags).
4287 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
4288
4289 testptr(rax,rax); // Set Z = 0
4290 repne_scan();
4291
4292 // Unspill the temp. registers:
4293 if (pushed_rdi) pop(rdi);
4294 if (pushed_rcx) pop(rcx);
4295 if (pushed_rax) pop(rax);
4296
4297 if (set_cond_codes) {
4298 // Special hack for the AD files: rdi is guaranteed non-zero.
4299 assert(!pushed_rdi, "rdi must be left non-null");
4300 // Also, the condition codes are properly set Z/NZ on succeed/failure.
4301 }
4302
4303 if (L_failure == &L_fallthrough)
4304 jccb(Assembler::notEqual, *L_failure);
4305 else jcc(Assembler::notEqual, *L_failure);
4306
4307 // Success. Cache the super we found and proceed in triumph.
4308 movptr(super_cache_addr, super_klass);
4309
4310 if (L_success != &L_fallthrough) {
4311 jmp(*L_success);
4312 }
4313
4314 #undef IS_A_TEMP
4315
4316 bind(L_fallthrough);
4317 }
4318
4319 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4320 Register super_klass,
4321 Register temp_reg,
4322 Register temp2_reg,
4323 Label* L_success,
4324 Label* L_failure,
4325 bool set_cond_codes) {
4326 assert(set_cond_codes == false, "must be false on 64-bit x86");
4327 check_klass_subtype_slow_path
4328 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
4329 L_success, L_failure);
4330 }
4331
4332 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4333 Register super_klass,
4334 Register temp_reg,
4335 Register temp2_reg,
4336 Register temp3_reg,
4337 Register temp4_reg,
4338 Label* L_success,
4339 Label* L_failure) {
4340 if (UseSecondarySupersTable) {
4341 check_klass_subtype_slow_path_table
4342 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
4343 L_success, L_failure);
4344 } else {
4345 check_klass_subtype_slow_path_linear
4346 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
4347 }
4348 }
4349
4350 Register MacroAssembler::allocate_if_noreg(Register r,
4351 RegSetIterator<Register> &available_regs,
4352 RegSet ®s_to_push) {
4353 if (!r->is_valid()) {
4354 r = *available_regs++;
4355 regs_to_push += r;
4356 }
4357 return r;
4358 }
4359
4360 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
4361 Register super_klass,
4362 Register temp_reg,
4363 Register temp2_reg,
4364 Register temp3_reg,
4365 Register result_reg,
4366 Label* L_success,
4367 Label* L_failure) {
4368 // NB! Callers may assume that, when temp2_reg is a valid register,
4369 // this code sets it to a nonzero value.
4370 bool temp2_reg_was_valid = temp2_reg->is_valid();
4371
4372 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
4373
4374 Label L_fallthrough;
4375 int label_nulls = 0;
4376 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4377 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4378 assert(label_nulls <= 1, "at most one null in the batch");
4379
4380 BLOCK_COMMENT("check_klass_subtype_slow_path_table");
4381
4382 RegSetIterator<Register> available_regs
4383 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
4384
4385 RegSet pushed_regs;
4386
4387 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
4388 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
4389 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
4390 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
4391 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
4392
4393 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
4394
4395 {
4396
4397 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
4398 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
4399 subptr(rsp, aligned_size);
4400 push_set(pushed_regs, 0);
4401
4402 lookup_secondary_supers_table_var(sub_klass,
4403 super_klass,
4404 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
4405 cmpq(result_reg, 0);
4406
4407 // Unspill the temp. registers:
4408 pop_set(pushed_regs, 0);
4409 // Increment SP but do not clobber flags.
4410 lea(rsp, Address(rsp, aligned_size));
4411 }
4412
4413 if (temp2_reg_was_valid) {
4414 movq(temp2_reg, 1);
4415 }
4416
4417 jcc(Assembler::notEqual, *L_failure);
4418
4419 if (L_success != &L_fallthrough) {
4420 jmp(*L_success);
4421 }
4422
4423 bind(L_fallthrough);
4424 }
4425
4426 // population_count variant for running without the POPCNT
4427 // instruction, which was introduced with SSE4.2 in 2008.
4428 void MacroAssembler::population_count(Register dst, Register src,
4429 Register scratch1, Register scratch2) {
4430 assert_different_registers(src, scratch1, scratch2);
4431 if (UsePopCountInstruction) {
4432 Assembler::popcntq(dst, src);
4433 } else {
4434 assert_different_registers(src, scratch1, scratch2);
4435 assert_different_registers(dst, scratch1, scratch2);
4436 Label loop, done;
4437
4438 mov(scratch1, src);
4439 // dst = 0;
4440 // while(scratch1 != 0) {
4441 // dst++;
4442 // scratch1 &= (scratch1 - 1);
4443 // }
4444 xorl(dst, dst);
4445 testq(scratch1, scratch1);
4446 jccb(Assembler::equal, done);
4447 {
4448 bind(loop);
4449 incq(dst);
4450 movq(scratch2, scratch1);
4451 decq(scratch2);
4452 andq(scratch1, scratch2);
4453 jccb(Assembler::notEqual, loop);
4454 }
4455 bind(done);
4456 }
4457 #ifdef ASSERT
4458 mov64(scratch1, 0xCafeBabeDeadBeef);
4459 movq(scratch2, scratch1);
4460 #endif
4461 }
4462
4463 // Ensure that the inline code and the stub are using the same registers.
4464 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
4465 do { \
4466 assert(r_super_klass == rax, "mismatch"); \
4467 assert(r_array_base == rbx, "mismatch"); \
4468 assert(r_array_length == rcx, "mismatch"); \
4469 assert(r_array_index == rdx, "mismatch"); \
4470 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \
4471 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \
4472 assert(result == rdi || result == noreg, "mismatch"); \
4473 } while(0)
4474
4475 // Versions of salq and rorq that don't need count to be in rcx
4476
4477 void MacroAssembler::salq(Register dest, Register count) {
4478 if (count == rcx) {
4479 Assembler::salq(dest);
4480 } else {
4481 assert_different_registers(rcx, dest);
4482 xchgq(rcx, count);
4483 Assembler::salq(dest);
4484 xchgq(rcx, count);
4485 }
4486 }
4487
4488 void MacroAssembler::rorq(Register dest, Register count) {
4489 if (count == rcx) {
4490 Assembler::rorq(dest);
4491 } else {
4492 assert_different_registers(rcx, dest);
4493 xchgq(rcx, count);
4494 Assembler::rorq(dest);
4495 xchgq(rcx, count);
4496 }
4497 }
4498
4499 // Return true: we succeeded in generating this code
4500 //
4501 // At runtime, return 0 in result if r_super_klass is a superclass of
4502 // r_sub_klass, otherwise return nonzero. Use this if you know the
4503 // super_klass_slot of the class you're looking for. This is always
4504 // the case for instanceof and checkcast.
4505 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
4506 Register r_super_klass,
4507 Register temp1,
4508 Register temp2,
4509 Register temp3,
4510 Register temp4,
4511 Register result,
4512 u1 super_klass_slot) {
4513 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4514
4515 Label L_fallthrough, L_success, L_failure;
4516
4517 BLOCK_COMMENT("lookup_secondary_supers_table {");
4518
4519 const Register
4520 r_array_index = temp1,
4521 r_array_length = temp2,
4522 r_array_base = temp3,
4523 r_bitmap = temp4;
4524
4525 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
4526
4527 xorq(result, result); // = 0
4528
4529 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4530 movq(r_array_index, r_bitmap);
4531
4532 // First check the bitmap to see if super_klass might be present. If
4533 // the bit is zero, we are certain that super_klass is not one of
4534 // the secondary supers.
4535 u1 bit = super_klass_slot;
4536 {
4537 // NB: If the count in a x86 shift instruction is 0, the flags are
4538 // not affected, so we do a testq instead.
4539 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
4540 if (shift_count != 0) {
4541 salq(r_array_index, shift_count);
4542 } else {
4543 testq(r_array_index, r_array_index);
4544 }
4545 }
4546 // We test the MSB of r_array_index, i.e. its sign bit
4547 jcc(Assembler::positive, L_failure);
4548
4549 // Get the first array index that can contain super_klass into r_array_index.
4550 if (bit != 0) {
4551 population_count(r_array_index, r_array_index, temp2, temp3);
4552 } else {
4553 movl(r_array_index, 1);
4554 }
4555 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4556
4557 // We will consult the secondary-super array.
4558 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4559
4560 // We're asserting that the first word in an Array<Klass*> is the
4561 // length, and the second word is the first word of the data. If
4562 // that ever changes, r_array_base will have to be adjusted here.
4563 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4564 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4565
4566 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4567 jccb(Assembler::equal, L_success);
4568
4569 // Is there another entry to check? Consult the bitmap.
4570 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
4571 jccb(Assembler::carryClear, L_failure);
4572
4573 // Linear probe. Rotate the bitmap so that the next bit to test is
4574 // in Bit 1.
4575 if (bit != 0) {
4576 rorq(r_bitmap, bit);
4577 }
4578
4579 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4580 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4581 // Kills: r_array_length.
4582 // Returns: result.
4583 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
4584 // Result (0/1) is in rdi
4585 jmpb(L_fallthrough);
4586
4587 bind(L_failure);
4588 incq(result); // 0 => 1
4589
4590 bind(L_success);
4591 // result = 0;
4592
4593 bind(L_fallthrough);
4594 BLOCK_COMMENT("} lookup_secondary_supers_table");
4595
4596 if (VerifySecondarySupers) {
4597 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4598 temp1, temp2, temp3);
4599 }
4600 }
4601
4602 // At runtime, return 0 in result if r_super_klass is a superclass of
4603 // r_sub_klass, otherwise return nonzero. Use this version of
4604 // lookup_secondary_supers_table() if you don't know ahead of time
4605 // which superclass will be searched for. Used by interpreter and
4606 // runtime stubs. It is larger and has somewhat greater latency than
4607 // the version above, which takes a constant super_klass_slot.
4608 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
4609 Register r_super_klass,
4610 Register temp1,
4611 Register temp2,
4612 Register temp3,
4613 Register temp4,
4614 Register result) {
4615 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4616 assert_different_registers(r_sub_klass, r_super_klass, rcx);
4617 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
4618
4619 Label L_fallthrough, L_success, L_failure;
4620
4621 BLOCK_COMMENT("lookup_secondary_supers_table {");
4622
4623 RegSetIterator<Register> available_regs = (temps - rcx).begin();
4624
4625 // FIXME. Once we are sure that all paths reaching this point really
4626 // do pass rcx as one of our temps we can get rid of the following
4627 // workaround.
4628 assert(temps.contains(rcx), "fix this code");
4629
4630 // We prefer to have our shift count in rcx. If rcx is one of our
4631 // temps, use it for slot. If not, pick any of our temps.
4632 Register slot;
4633 if (!temps.contains(rcx)) {
4634 slot = *available_regs++;
4635 } else {
4636 slot = rcx;
4637 }
4638
4639 const Register r_array_index = *available_regs++;
4640 const Register r_bitmap = *available_regs++;
4641
4642 // The logic above guarantees this property, but we state it here.
4643 assert_different_registers(r_array_index, r_bitmap, rcx);
4644
4645 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4646 movq(r_array_index, r_bitmap);
4647
4648 // First check the bitmap to see if super_klass might be present. If
4649 // the bit is zero, we are certain that super_klass is not one of
4650 // the secondary supers.
4651 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4652 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
4653 salq(r_array_index, slot);
4654
4655 testq(r_array_index, r_array_index);
4656 // We test the MSB of r_array_index, i.e. its sign bit
4657 jcc(Assembler::positive, L_failure);
4658
4659 const Register r_array_base = *available_regs++;
4660
4661 // Get the first array index that can contain super_klass into r_array_index.
4662 // Note: Clobbers r_array_base and slot.
4663 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
4664
4665 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4666
4667 // We will consult the secondary-super array.
4668 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4669
4670 // We're asserting that the first word in an Array<Klass*> is the
4671 // length, and the second word is the first word of the data. If
4672 // that ever changes, r_array_base will have to be adjusted here.
4673 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4674 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4675
4676 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4677 jccb(Assembler::equal, L_success);
4678
4679 // Restore slot to its true value
4680 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4681
4682 // Linear probe. Rotate the bitmap so that the next bit to test is
4683 // in Bit 1.
4684 rorq(r_bitmap, slot);
4685
4686 // Is there another entry to check? Consult the bitmap.
4687 btq(r_bitmap, 1);
4688 jccb(Assembler::carryClear, L_failure);
4689
4690 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4691 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4692 // Kills: r_array_length.
4693 // Returns: result.
4694 lookup_secondary_supers_table_slow_path(r_super_klass,
4695 r_array_base,
4696 r_array_index,
4697 r_bitmap,
4698 /*temp1*/result,
4699 /*temp2*/slot,
4700 &L_success,
4701 nullptr);
4702
4703 bind(L_failure);
4704 movq(result, 1);
4705 jmpb(L_fallthrough);
4706
4707 bind(L_success);
4708 xorq(result, result); // = 0
4709
4710 bind(L_fallthrough);
4711 BLOCK_COMMENT("} lookup_secondary_supers_table");
4712
4713 if (VerifySecondarySupers) {
4714 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4715 temp1, temp2, temp3);
4716 }
4717 }
4718
4719 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
4720 Label* L_success, Label* L_failure) {
4721 Label L_loop, L_fallthrough;
4722 {
4723 int label_nulls = 0;
4724 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4725 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4726 assert(label_nulls <= 1, "at most one null in the batch");
4727 }
4728 bind(L_loop);
4729 cmpq(value, Address(addr, count, Address::times_8));
4730 jcc(Assembler::equal, *L_success);
4731 addl(count, 1);
4732 cmpl(count, limit);
4733 jcc(Assembler::less, L_loop);
4734
4735 if (&L_fallthrough != L_failure) {
4736 jmp(*L_failure);
4737 }
4738 bind(L_fallthrough);
4739 }
4740
4741 // Called by code generated by check_klass_subtype_slow_path
4742 // above. This is called when there is a collision in the hashed
4743 // lookup in the secondary supers array.
4744 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
4745 Register r_array_base,
4746 Register r_array_index,
4747 Register r_bitmap,
4748 Register temp1,
4749 Register temp2,
4750 Label* L_success,
4751 Label* L_failure) {
4752 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
4753
4754 const Register
4755 r_array_length = temp1,
4756 r_sub_klass = noreg,
4757 result = noreg;
4758
4759 Label L_fallthrough;
4760 int label_nulls = 0;
4761 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4762 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4763 assert(label_nulls <= 1, "at most one null in the batch");
4764
4765 // Load the array length.
4766 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4767 // And adjust the array base to point to the data.
4768 // NB! Effectively increments current slot index by 1.
4769 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
4770 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4771
4772 // Linear probe
4773 Label L_huge;
4774
4775 // The bitmap is full to bursting.
4776 // Implicit invariant: BITMAP_FULL implies (length > 0)
4777 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
4778 jcc(Assembler::greater, L_huge);
4779
4780 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
4781 // current slot (at secondary_supers[r_array_index]) has not yet
4782 // been inspected, and r_array_index may be out of bounds if we
4783 // wrapped around the end of the array.
4784
4785 { // This is conventional linear probing, but instead of terminating
4786 // when a null entry is found in the table, we maintain a bitmap
4787 // in which a 0 indicates missing entries.
4788 // The check above guarantees there are 0s in the bitmap, so the loop
4789 // eventually terminates.
4790
4791 xorl(temp2, temp2); // = 0;
4792
4793 Label L_again;
4794 bind(L_again);
4795
4796 // Check for array wraparound.
4797 cmpl(r_array_index, r_array_length);
4798 cmovl(Assembler::greaterEqual, r_array_index, temp2);
4799
4800 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4801 jcc(Assembler::equal, *L_success);
4802
4803 // If the next bit in bitmap is zero, we're done.
4804 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
4805 jcc(Assembler::carryClear, *L_failure);
4806
4807 rorq(r_bitmap, 1); // Bits 1/2 => 0/1
4808 addl(r_array_index, 1);
4809
4810 jmp(L_again);
4811 }
4812
4813 { // Degenerate case: more than 64 secondary supers.
4814 // FIXME: We could do something smarter here, maybe a vectorized
4815 // comparison or a binary search, but is that worth any added
4816 // complexity?
4817 bind(L_huge);
4818 xorl(r_array_index, r_array_index); // = 0
4819 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
4820 L_success,
4821 (&L_fallthrough != L_failure ? L_failure : nullptr));
4822
4823 bind(L_fallthrough);
4824 }
4825 }
4826
4827 struct VerifyHelperArguments {
4828 Klass* _super;
4829 Klass* _sub;
4830 intptr_t _linear_result;
4831 intptr_t _table_result;
4832 };
4833
4834 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
4835 Klass::on_secondary_supers_verification_failure(args->_super,
4836 args->_sub,
4837 args->_linear_result,
4838 args->_table_result,
4839 msg);
4840 }
4841
4842 // Make sure that the hashed lookup and a linear scan agree.
4843 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
4844 Register r_super_klass,
4845 Register result,
4846 Register temp1,
4847 Register temp2,
4848 Register temp3) {
4849 const Register
4850 r_array_index = temp1,
4851 r_array_length = temp2,
4852 r_array_base = temp3,
4853 r_bitmap = noreg;
4854
4855 BLOCK_COMMENT("verify_secondary_supers_table {");
4856
4857 Label L_success, L_failure, L_check, L_done;
4858
4859 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4860 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4861 // And adjust the array base to point to the data.
4862 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4863
4864 testl(r_array_length, r_array_length); // array_length == 0?
4865 jcc(Assembler::zero, L_failure);
4866
4867 movl(r_array_index, 0);
4868 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
4869 // fall through to L_failure
4870
4871 const Register linear_result = r_array_index; // reuse temp1
4872
4873 bind(L_failure); // not present
4874 movl(linear_result, 1);
4875 jmp(L_check);
4876
4877 bind(L_success); // present
4878 movl(linear_result, 0);
4879
4880 bind(L_check);
4881 cmpl(linear_result, result);
4882 jcc(Assembler::equal, L_done);
4883
4884 { // To avoid calling convention issues, build a record on the stack
4885 // and pass the pointer to that instead.
4886 push(result);
4887 push(linear_result);
4888 push(r_sub_klass);
4889 push(r_super_klass);
4890 movptr(c_rarg1, rsp);
4891 movptr(c_rarg0, (uintptr_t) "mismatch");
4892 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
4893 should_not_reach_here();
4894 }
4895 bind(L_done);
4896
4897 BLOCK_COMMENT("} verify_secondary_supers_table");
4898 }
4899
4900 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
4901
4902 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
4903 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
4904
4905 Label L_fallthrough;
4906 if (L_fast_path == nullptr) {
4907 L_fast_path = &L_fallthrough;
4908 } else if (L_slow_path == nullptr) {
4909 L_slow_path = &L_fallthrough;
4910 }
4911
4912 // Fast path check: class is fully initialized.
4913 // init_state needs acquire, but x86 is TSO, and so we are already good.
4914 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4915 jcc(Assembler::equal, *L_fast_path);
4916
4917 // Fast path check: current thread is initializer thread
4918 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
4919 if (L_slow_path == &L_fallthrough) {
4920 jcc(Assembler::equal, *L_fast_path);
4921 bind(*L_slow_path);
4922 } else if (L_fast_path == &L_fallthrough) {
4923 jcc(Assembler::notEqual, *L_slow_path);
4924 bind(*L_fast_path);
4925 } else {
4926 Unimplemented();
4927 }
4928 }
4929
4930 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
4931 if (VM_Version::supports_cmov()) {
4932 cmovl(cc, dst, src);
4933 } else {
4934 Label L;
4935 jccb(negate_condition(cc), L);
4936 movl(dst, src);
4937 bind(L);
4938 }
4939 }
4940
4941 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
4942 if (VM_Version::supports_cmov()) {
4943 cmovl(cc, dst, src);
4944 } else {
4945 Label L;
4946 jccb(negate_condition(cc), L);
4947 movl(dst, src);
4948 bind(L);
4949 }
4950 }
4951
4952 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
4953 if (!VerifyOops || VerifyAdapterSharing) {
4954 // Below address of the code string confuses VerifyAdapterSharing
4955 // because it may differ between otherwise equivalent adapters.
4956 return;
4957 }
4958
4959 BLOCK_COMMENT("verify_oop {");
4960 push(rscratch1);
4961 push(rax); // save rax
4962 push(reg); // pass register argument
4963
4964 // Pass register number to verify_oop_subroutine
4965 const char* b = nullptr;
4966 {
4967 ResourceMark rm;
4968 stringStream ss;
4969 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
4970 b = code_string(ss.as_string());
4971 }
4972 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
4973 pushptr(buffer.addr(), rscratch1);
4974
4975 // call indirectly to solve generation ordering problem
4976 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4977 call(rax);
4978 // Caller pops the arguments (oop, message) and restores rax, r10
4979 BLOCK_COMMENT("} verify_oop");
4980 }
4981
4982 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
4983 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
4984 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
4985 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
4986 vpternlogd(dst, 0xFF, dst, dst, vector_len);
4987 } else if (VM_Version::supports_avx()) {
4988 vpcmpeqd(dst, dst, dst, vector_len);
4989 } else {
4990 pcmpeqd(dst, dst);
4991 }
4992 }
4993
4994 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
4995 int extra_slot_offset) {
4996 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
4997 int stackElementSize = Interpreter::stackElementSize;
4998 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
4999 #ifdef ASSERT
5000 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5001 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5002 #endif
5003 Register scale_reg = noreg;
5004 Address::ScaleFactor scale_factor = Address::no_scale;
5005 if (arg_slot.is_constant()) {
5006 offset += arg_slot.as_constant() * stackElementSize;
5007 } else {
5008 scale_reg = arg_slot.as_register();
5009 scale_factor = Address::times(stackElementSize);
5010 }
5011 offset += wordSize; // return PC is on stack
5012 return Address(rsp, scale_reg, scale_factor, offset);
5013 }
5014
5015 // Handle the receiver type profile update given the "recv" klass.
5016 //
5017 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
5018 // If there are no matching or claimable receiver entries in RD, updates
5019 // the polymorphic counter.
5020 //
5021 // This code expected to run by either the interpreter or JIT-ed code, without
5022 // extra synchronization. For safety, receiver cells are claimed atomically, which
5023 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
5024 // counter updates are not atomic.
5025 //
5026 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
5027 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
5028 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
5029 int poly_count_offset = in_bytes(CounterData::count_offset());
5030 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
5031 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
5032
5033 // Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
5034 assert(is_aligned(mdp_offset, BytesPerWord), "sanity");
5035 base_receiver_offset += mdp_offset;
5036 end_receiver_offset += mdp_offset;
5037 poly_count_offset += mdp_offset;
5038
5039 // Scale down to optimize encoding. Slots are pointer-sized.
5040 assert(is_aligned(base_receiver_offset, BytesPerWord), "sanity");
5041 assert(is_aligned(end_receiver_offset, BytesPerWord), "sanity");
5042 assert(is_aligned(poly_count_offset, BytesPerWord), "sanity");
5043 assert(is_aligned(receiver_step, BytesPerWord), "sanity");
5044 assert(is_aligned(receiver_to_count_step, BytesPerWord), "sanity");
5045 base_receiver_offset >>= LogBytesPerWord;
5046 end_receiver_offset >>= LogBytesPerWord;
5047 poly_count_offset >>= LogBytesPerWord;
5048 receiver_step >>= LogBytesPerWord;
5049 receiver_to_count_step >>= LogBytesPerWord;
5050
5051 #ifdef ASSERT
5052 // We are about to walk the MDO slots without asking for offsets.
5053 // Check that our math hits all the right spots.
5054 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
5055 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
5056 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
5057 int offset = base_receiver_offset + receiver_step*c;
5058 int count_offset = offset + receiver_to_count_step;
5059 assert((offset << LogBytesPerWord) == real_recv_offset, "receiver slot math");
5060 assert((count_offset << LogBytesPerWord) == real_count_offset, "receiver count math");
5061 }
5062 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
5063 assert(poly_count_offset << LogBytesPerWord == real_poly_count_offset, "poly counter math");
5064 #endif
5065
5066 // Corner case: no profile table. Increment poly counter and exit.
5067 if (ReceiverTypeData::row_limit() == 0) {
5068 addptr(Address(mdp, poly_count_offset, Address::times_ptr), DataLayout::counter_increment);
5069 return;
5070 }
5071
5072 Register offset = rscratch1;
5073
5074 Label L_loop_search_receiver, L_loop_search_empty;
5075 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
5076
5077 // The code here recognizes three major cases:
5078 // A. Fastest: receiver found in the table
5079 // B. Fast: no receiver in the table, and the table is full
5080 // C. Slow: no receiver in the table, free slots in the table
5081 //
5082 // The case A performance is most important, as perfectly-behaved code would end up
5083 // there, especially with larger TypeProfileWidth. The case B performance is
5084 // important as well, this is where bulk of code would land for normally megamorphic
5085 // cases. The case C performance is not essential, its job is to deal with installation
5086 // races, we optimize for code density instead. Case C needs to make sure that receiver
5087 // rows are only claimed once. This makes sure we never overwrite a row for another
5088 // receiver and never duplicate the receivers in the list, making profile type-accurate.
5089 //
5090 // It is very tempting to handle these cases in a single loop, and claim the first slot
5091 // without checking the rest of the table. But, profiling code should tolerate free slots
5092 // in the table, as class unloading can clear them. After such cleanup, the receiver
5093 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
5094 // to complete, before trying to install new slots. Splitting the code in several tight
5095 // loops also helpfully optimizes for cases A and B.
5096 //
5097 // This code is effectively:
5098 //
5099 // restart:
5100 // // Fastest: receiver is already installed
5101 // for (i = 0; i < receiver_count(); i++) {
5102 // if (receiver(i) == recv) goto found_recv(i);
5103 // }
5104 //
5105 // // Fast: no receiver, but profile is full
5106 // for (i = 0; i < receiver_count(); i++) {
5107 // if (receiver(i) == null) goto found_null(i);
5108 // }
5109 // goto polymorphic
5110 //
5111 // // Slow: try to install receiver
5112 // found_null(i):
5113 // CAS(&receiver(i), null, recv);
5114 // goto restart
5115 //
5116 // polymorphic:
5117 // count++;
5118 // return
5119 //
5120 // found_recv(i):
5121 // *receiver_count(i)++
5122 //
5123
5124 bind(L_restart);
5125
5126 // Fastest: receiver is already installed
5127 movptr(offset, base_receiver_offset);
5128 bind(L_loop_search_receiver);
5129 cmpptr(recv, Address(mdp, offset, Address::times_ptr));
5130 jccb(Assembler::equal, L_found_recv);
5131 addptr(offset, receiver_step);
5132 cmpptr(offset, end_receiver_offset);
5133 jccb(Assembler::notEqual, L_loop_search_receiver);
5134
5135 // Fast: no receiver, but profile is full
5136 movptr(offset, base_receiver_offset);
5137 bind(L_loop_search_empty);
5138 cmpptr(Address(mdp, offset, Address::times_ptr), NULL_WORD);
5139 jccb(Assembler::equal, L_found_empty);
5140 addptr(offset, receiver_step);
5141 cmpptr(offset, end_receiver_offset);
5142 jccb(Assembler::notEqual, L_loop_search_empty);
5143 jmpb(L_polymorphic);
5144
5145 // Slow: try to install receiver
5146 bind(L_found_empty);
5147
5148 // Atomically swing receiver slot: null -> recv.
5149 //
5150 // The update code uses CAS, which wants RAX register specifically, *and* it needs
5151 // other important registers untouched, as they form the address. Therefore, we need
5152 // to shift any important registers from RAX into some other spare register. If we
5153 // have a spare register, we are forced to save it on stack here.
5154
5155 Register spare_reg = noreg;
5156 Register shifted_mdp = mdp;
5157 Register shifted_recv = recv;
5158 if (recv == rax || mdp == rax) {
5159 spare_reg = (recv != rbx && mdp != rbx) ? rbx :
5160 (recv != rcx && mdp != rcx) ? rcx :
5161 rdx;
5162 assert_different_registers(mdp, recv, offset, spare_reg);
5163
5164 push(spare_reg);
5165 if (recv == rax) {
5166 movptr(spare_reg, recv);
5167 shifted_recv = spare_reg;
5168 } else {
5169 assert(mdp == rax, "Remaining case");
5170 movptr(spare_reg, mdp);
5171 shifted_mdp = spare_reg;
5172 }
5173 } else {
5174 push(rax);
5175 }
5176
5177 // None of the important registers are in RAX after this shuffle.
5178 assert_different_registers(rax, shifted_mdp, shifted_recv, offset);
5179
5180 xorptr(rax, rax);
5181 cmpxchgptr(shifted_recv, Address(shifted_mdp, offset, Address::times_ptr));
5182
5183 // Unshift registers.
5184 if (recv == rax || mdp == rax) {
5185 movptr(rax, spare_reg);
5186 pop(spare_reg);
5187 } else {
5188 pop(rax);
5189 }
5190
5191 // CAS success means the slot now has the receiver we want. CAS failure means
5192 // something had claimed the slot concurrently: it can be the same receiver we want,
5193 // or something else. Since this is a slow path, we can optimize for code density,
5194 // and just restart the search from the beginning.
5195 jmpb(L_restart);
5196
5197 // Counter updates:
5198
5199 // Increment polymorphic counter instead of receiver slot.
5200 bind(L_polymorphic);
5201 movptr(offset, poly_count_offset);
5202 jmpb(L_count_update);
5203
5204 // Found a receiver, convert its slot offset to corresponding count offset.
5205 bind(L_found_recv);
5206 addptr(offset, receiver_to_count_step);
5207
5208 bind(L_count_update);
5209 addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
5210 }
5211
5212 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5213 if (!VerifyOops || VerifyAdapterSharing) {
5214 // Below address of the code string confuses VerifyAdapterSharing
5215 // because it may differ between otherwise equivalent adapters.
5216 return;
5217 }
5218
5219 push(rscratch1);
5220 push(rax); // save rax,
5221 // addr may contain rsp so we will have to adjust it based on the push
5222 // we just did (and on 64 bit we do two pushes)
5223 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5224 // stores rax into addr which is backwards of what was intended.
5225 if (addr.uses(rsp)) {
5226 lea(rax, addr);
5227 pushptr(Address(rax, 2 * BytesPerWord));
5228 } else {
5229 pushptr(addr);
5230 }
5231
5232 // Pass register number to verify_oop_subroutine
5233 const char* b = nullptr;
5234 {
5235 ResourceMark rm;
5236 stringStream ss;
5237 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
5238 b = code_string(ss.as_string());
5239 }
5240 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5241 pushptr(buffer.addr(), rscratch1);
5242
5243 // call indirectly to solve generation ordering problem
5244 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5245 call(rax);
5246 // Caller pops the arguments (addr, message) and restores rax, r10.
5247 }
5248
5249 void MacroAssembler::verify_tlab() {
5250 #ifdef ASSERT
5251 if (UseTLAB && VerifyOops) {
5252 Label next, ok;
5253 Register t1 = rsi;
5254
5255 push(t1);
5256
5257 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5258 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
5259 jcc(Assembler::aboveEqual, next);
5260 STOP("assert(top >= start)");
5261 should_not_reach_here();
5262
5263 bind(next);
5264 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
5265 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5266 jcc(Assembler::aboveEqual, ok);
5267 STOP("assert(top <= end)");
5268 should_not_reach_here();
5269
5270 bind(ok);
5271 pop(t1);
5272 }
5273 #endif
5274 }
5275
5276 class ControlWord {
5277 public:
5278 int32_t _value;
5279
5280 int rounding_control() const { return (_value >> 10) & 3 ; }
5281 int precision_control() const { return (_value >> 8) & 3 ; }
5282 bool precision() const { return ((_value >> 5) & 1) != 0; }
5283 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5284 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5285 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5286 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5287 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5288
5289 void print() const {
5290 // rounding control
5291 const char* rc;
5292 switch (rounding_control()) {
5293 case 0: rc = "round near"; break;
5294 case 1: rc = "round down"; break;
5295 case 2: rc = "round up "; break;
5296 case 3: rc = "chop "; break;
5297 default:
5298 rc = nullptr; // silence compiler warnings
5299 fatal("Unknown rounding control: %d", rounding_control());
5300 };
5301 // precision control
5302 const char* pc;
5303 switch (precision_control()) {
5304 case 0: pc = "24 bits "; break;
5305 case 1: pc = "reserved"; break;
5306 case 2: pc = "53 bits "; break;
5307 case 3: pc = "64 bits "; break;
5308 default:
5309 pc = nullptr; // silence compiler warnings
5310 fatal("Unknown precision control: %d", precision_control());
5311 };
5312 // flags
5313 char f[9];
5314 f[0] = ' ';
5315 f[1] = ' ';
5316 f[2] = (precision ()) ? 'P' : 'p';
5317 f[3] = (underflow ()) ? 'U' : 'u';
5318 f[4] = (overflow ()) ? 'O' : 'o';
5319 f[5] = (zero_divide ()) ? 'Z' : 'z';
5320 f[6] = (denormalized()) ? 'D' : 'd';
5321 f[7] = (invalid ()) ? 'I' : 'i';
5322 f[8] = '\x0';
5323 // output
5324 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5325 }
5326
5327 };
5328
5329 class StatusWord {
5330 public:
5331 int32_t _value;
5332
5333 bool busy() const { return ((_value >> 15) & 1) != 0; }
5334 bool C3() const { return ((_value >> 14) & 1) != 0; }
5335 bool C2() const { return ((_value >> 10) & 1) != 0; }
5336 bool C1() const { return ((_value >> 9) & 1) != 0; }
5337 bool C0() const { return ((_value >> 8) & 1) != 0; }
5338 int top() const { return (_value >> 11) & 7 ; }
5339 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5340 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5341 bool precision() const { return ((_value >> 5) & 1) != 0; }
5342 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5343 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5344 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5345 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5346 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5347
5348 void print() const {
5349 // condition codes
5350 char c[5];
5351 c[0] = (C3()) ? '3' : '-';
5352 c[1] = (C2()) ? '2' : '-';
5353 c[2] = (C1()) ? '1' : '-';
5354 c[3] = (C0()) ? '0' : '-';
5355 c[4] = '\x0';
5356 // flags
5357 char f[9];
5358 f[0] = (error_status()) ? 'E' : '-';
5359 f[1] = (stack_fault ()) ? 'S' : '-';
5360 f[2] = (precision ()) ? 'P' : '-';
5361 f[3] = (underflow ()) ? 'U' : '-';
5362 f[4] = (overflow ()) ? 'O' : '-';
5363 f[5] = (zero_divide ()) ? 'Z' : '-';
5364 f[6] = (denormalized()) ? 'D' : '-';
5365 f[7] = (invalid ()) ? 'I' : '-';
5366 f[8] = '\x0';
5367 // output
5368 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5369 }
5370
5371 };
5372
5373 class TagWord {
5374 public:
5375 int32_t _value;
5376
5377 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5378
5379 void print() const {
5380 printf("%04x", _value & 0xFFFF);
5381 }
5382
5383 };
5384
5385 class FPU_Register {
5386 public:
5387 int32_t _m0;
5388 int32_t _m1;
5389 int16_t _ex;
5390
5391 bool is_indefinite() const {
5392 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5393 }
5394
5395 void print() const {
5396 char sign = (_ex < 0) ? '-' : '+';
5397 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5398 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5399 };
5400
5401 };
5402
5403 class FPU_State {
5404 public:
5405 enum {
5406 register_size = 10,
5407 number_of_registers = 8,
5408 register_mask = 7
5409 };
5410
5411 ControlWord _control_word;
5412 StatusWord _status_word;
5413 TagWord _tag_word;
5414 int32_t _error_offset;
5415 int32_t _error_selector;
5416 int32_t _data_offset;
5417 int32_t _data_selector;
5418 int8_t _register[register_size * number_of_registers];
5419
5420 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5421 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5422
5423 const char* tag_as_string(int tag) const {
5424 switch (tag) {
5425 case 0: return "valid";
5426 case 1: return "zero";
5427 case 2: return "special";
5428 case 3: return "empty";
5429 }
5430 ShouldNotReachHere();
5431 return nullptr;
5432 }
5433
5434 void print() const {
5435 // print computation registers
5436 { int t = _status_word.top();
5437 for (int i = 0; i < number_of_registers; i++) {
5438 int j = (i - t) & register_mask;
5439 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5440 st(j)->print();
5441 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5442 }
5443 }
5444 printf("\n");
5445 // print control registers
5446 printf("ctrl = "); _control_word.print(); printf("\n");
5447 printf("stat = "); _status_word .print(); printf("\n");
5448 printf("tags = "); _tag_word .print(); printf("\n");
5449 }
5450
5451 };
5452
5453 class Flag_Register {
5454 public:
5455 int32_t _value;
5456
5457 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5458 bool direction() const { return ((_value >> 10) & 1) != 0; }
5459 bool sign() const { return ((_value >> 7) & 1) != 0; }
5460 bool zero() const { return ((_value >> 6) & 1) != 0; }
5461 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5462 bool parity() const { return ((_value >> 2) & 1) != 0; }
5463 bool carry() const { return ((_value >> 0) & 1) != 0; }
5464
5465 void print() const {
5466 // flags
5467 char f[8];
5468 f[0] = (overflow ()) ? 'O' : '-';
5469 f[1] = (direction ()) ? 'D' : '-';
5470 f[2] = (sign ()) ? 'S' : '-';
5471 f[3] = (zero ()) ? 'Z' : '-';
5472 f[4] = (auxiliary_carry()) ? 'A' : '-';
5473 f[5] = (parity ()) ? 'P' : '-';
5474 f[6] = (carry ()) ? 'C' : '-';
5475 f[7] = '\x0';
5476 // output
5477 printf("%08x flags = %s", _value, f);
5478 }
5479
5480 };
5481
5482 class IU_Register {
5483 public:
5484 int32_t _value;
5485
5486 void print() const {
5487 printf("%08x %11d", _value, _value);
5488 }
5489
5490 };
5491
5492 class IU_State {
5493 public:
5494 Flag_Register _eflags;
5495 IU_Register _rdi;
5496 IU_Register _rsi;
5497 IU_Register _rbp;
5498 IU_Register _rsp;
5499 IU_Register _rbx;
5500 IU_Register _rdx;
5501 IU_Register _rcx;
5502 IU_Register _rax;
5503
5504 void print() const {
5505 // computation registers
5506 printf("rax, = "); _rax.print(); printf("\n");
5507 printf("rbx, = "); _rbx.print(); printf("\n");
5508 printf("rcx = "); _rcx.print(); printf("\n");
5509 printf("rdx = "); _rdx.print(); printf("\n");
5510 printf("rdi = "); _rdi.print(); printf("\n");
5511 printf("rsi = "); _rsi.print(); printf("\n");
5512 printf("rbp, = "); _rbp.print(); printf("\n");
5513 printf("rsp = "); _rsp.print(); printf("\n");
5514 printf("\n");
5515 // control registers
5516 printf("flgs = "); _eflags.print(); printf("\n");
5517 }
5518 };
5519
5520
5521 class CPU_State {
5522 public:
5523 FPU_State _fpu_state;
5524 IU_State _iu_state;
5525
5526 void print() const {
5527 printf("--------------------------------------------------\n");
5528 _iu_state .print();
5529 printf("\n");
5530 _fpu_state.print();
5531 printf("--------------------------------------------------\n");
5532 }
5533
5534 };
5535
5536
5537 static void _print_CPU_state(CPU_State* state) {
5538 state->print();
5539 };
5540
5541
5542 void MacroAssembler::print_CPU_state() {
5543 push_CPU_state();
5544 push(rsp); // pass CPU state
5545 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5546 addptr(rsp, wordSize); // discard argument
5547 pop_CPU_state();
5548 }
5549
5550 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
5551 // Either restore the MXCSR register after returning from the JNI Call
5552 // or verify that it wasn't changed (with -Xcheck:jni flag).
5553 if (VM_Version::supports_sse()) {
5554 if (RestoreMXCSROnJNICalls) {
5555 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
5556 } else if (CheckJNICalls) {
5557 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5558 }
5559 }
5560 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5561 vzeroupper();
5562 }
5563
5564 // ((OopHandle)result).resolve();
5565 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
5566 assert_different_registers(result, tmp);
5567
5568 // Only 64 bit platforms support GCs that require a tmp register
5569 // Only IN_HEAP loads require a thread_tmp register
5570 // OopHandle::resolve is an indirection like jobject.
5571 access_load_at(T_OBJECT, IN_NATIVE,
5572 result, Address(result, 0), tmp);
5573 }
5574
5575 // ((WeakHandle)result).resolve();
5576 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
5577 assert_different_registers(rresult, rtmp);
5578 Label resolved;
5579
5580 // A null weak handle resolves to null.
5581 cmpptr(rresult, 0);
5582 jcc(Assembler::equal, resolved);
5583
5584 // Only 64 bit platforms support GCs that require a tmp register
5585 // Only IN_HEAP loads require a thread_tmp register
5586 // WeakHandle::resolve is an indirection like jweak.
5587 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5588 rresult, Address(rresult, 0), rtmp);
5589 bind(resolved);
5590 }
5591
5592 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5593 // get mirror
5594 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5595 load_method_holder(mirror, method);
5596 movptr(mirror, Address(mirror, mirror_offset));
5597 resolve_oop_handle(mirror, tmp);
5598 }
5599
5600 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5601 load_method_holder(rresult, rmethod);
5602 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5603 }
5604
5605 void MacroAssembler::load_method_holder(Register holder, Register method) {
5606 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5607 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5608 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5609 }
5610
5611 void MacroAssembler::load_metadata(Register dst, Register src) {
5612 if (UseCompactObjectHeaders) {
5613 load_narrow_klass_compact(dst, src);
5614 } else if (UseCompressedClassPointers) {
5615 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5616 } else {
5617 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5618 }
5619 }
5620
5621 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5622 assert(UseCompactObjectHeaders, "expect compact object headers");
5623 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5624 shrq(dst, markWord::klass_shift);
5625 }
5626
5627 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5628 assert_different_registers(src, tmp);
5629 assert_different_registers(dst, tmp);
5630
5631 if (UseCompactObjectHeaders) {
5632 load_narrow_klass_compact(dst, src);
5633 decode_klass_not_null(dst, tmp);
5634 } else if (UseCompressedClassPointers) {
5635 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5636 decode_klass_not_null(dst, tmp);
5637 } else {
5638 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5639 }
5640 }
5641
5642 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
5643 load_klass(dst, src, tmp);
5644 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5645 }
5646
5647 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5648 assert(!UseCompactObjectHeaders, "not with compact headers");
5649 assert_different_registers(src, tmp);
5650 assert_different_registers(dst, tmp);
5651 if (UseCompressedClassPointers) {
5652 encode_klass_not_null(src, tmp);
5653 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5654 } else {
5655 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5656 }
5657 }
5658
5659 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5660 if (UseCompactObjectHeaders) {
5661 assert(tmp != noreg, "need tmp");
5662 assert_different_registers(klass, obj, tmp);
5663 load_narrow_klass_compact(tmp, obj);
5664 cmpl(klass, tmp);
5665 } else if (UseCompressedClassPointers) {
5666 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5667 } else {
5668 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5669 }
5670 }
5671
5672 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5673 if (UseCompactObjectHeaders) {
5674 assert(tmp2 != noreg, "need tmp2");
5675 assert_different_registers(obj1, obj2, tmp1, tmp2);
5676 load_narrow_klass_compact(tmp1, obj1);
5677 load_narrow_klass_compact(tmp2, obj2);
5678 cmpl(tmp1, tmp2);
5679 } else if (UseCompressedClassPointers) {
5680 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5681 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5682 } else {
5683 movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5684 cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5685 }
5686 }
5687
5688 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5689 Register tmp1) {
5690 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5691 decorators = AccessInternal::decorator_fixup(decorators, type);
5692 bool as_raw = (decorators & AS_RAW) != 0;
5693 if (as_raw) {
5694 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
5695 } else {
5696 bs->load_at(this, decorators, type, dst, src, tmp1);
5697 }
5698 }
5699
5700 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5701 Register tmp1, Register tmp2, Register tmp3) {
5702 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5703 decorators = AccessInternal::decorator_fixup(decorators, type);
5704 bool as_raw = (decorators & AS_RAW) != 0;
5705 if (as_raw) {
5706 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5707 } else {
5708 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5709 }
5710 }
5711
5712 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5713 Register inline_layout_info) {
5714 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5715 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5716 }
5717
5718 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5719 movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
5720 movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
5721 }
5722
5723 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
5724 // ((address) (void*) o) + vk->payload_offset();
5725 Register offset = (data == oop) ? rscratch1 : data;
5726 payload_offset(inline_klass, offset);
5727 if (data == oop) {
5728 addptr(data, offset);
5729 } else {
5730 lea(data, Address(oop, offset));
5731 }
5732 }
5733
5734 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5735 Register index, Register data) {
5736 assert(index != rcx, "index needs to shift by rcx");
5737 assert_different_registers(array, array_klass, index);
5738 assert_different_registers(rcx, array, index);
5739
5740 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5741 movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
5742
5743 // Klass::layout_helper_log2_element_size(lh)
5744 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5745 shrl(rcx, Klass::_lh_log2_element_size_shift);
5746 andl(rcx, Klass::_lh_log2_element_size_mask);
5747 shlptr(index); // index << rcx
5748
5749 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
5750 }
5751
5752 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5753 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
5754 }
5755
5756 // Doesn't do verification, generates fixed size code
5757 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5758 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
5759 }
5760
5761 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5762 Register tmp2, Register tmp3, DecoratorSet decorators) {
5763 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5764 }
5765
5766 // Used for storing nulls.
5767 void MacroAssembler::store_heap_oop_null(Address dst) {
5768 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5769 }
5770
5771 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5772 assert(!UseCompactObjectHeaders, "Don't use with compact headers");
5773 if (UseCompressedClassPointers) {
5774 // Store to klass gap in destination
5775 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5776 }
5777 }
5778
5779 #ifdef ASSERT
5780 void MacroAssembler::verify_heapbase(const char* msg) {
5781 assert (UseCompressedOops, "should be compressed");
5782 assert (Universe::heap() != nullptr, "java heap should be initialized");
5783 if (CheckCompressedOops) {
5784 Label ok;
5785 ExternalAddress src2(CompressedOops::base_addr());
5786 const bool is_src2_reachable = reachable(src2);
5787 if (!is_src2_reachable) {
5788 push(rscratch1); // cmpptr trashes rscratch1
5789 }
5790 cmpptr(r12_heapbase, src2, rscratch1);
5791 jcc(Assembler::equal, ok);
5792 STOP(msg);
5793 bind(ok);
5794 if (!is_src2_reachable) {
5795 pop(rscratch1);
5796 }
5797 }
5798 }
5799 #endif
5800
5801 // Algorithm must match oop.inline.hpp encode_heap_oop.
5802 void MacroAssembler::encode_heap_oop(Register r) {
5803 #ifdef ASSERT
5804 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5805 #endif
5806 verify_oop_msg(r, "broken oop in encode_heap_oop");
5807 if (CompressedOops::base() == nullptr) {
5808 if (CompressedOops::shift() != 0) {
5809 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5810 shrq(r, LogMinObjAlignmentInBytes);
5811 }
5812 return;
5813 }
5814 testq(r, r);
5815 cmovq(Assembler::equal, r, r12_heapbase);
5816 subq(r, r12_heapbase);
5817 shrq(r, LogMinObjAlignmentInBytes);
5818 }
5819
5820 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5821 #ifdef ASSERT
5822 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5823 if (CheckCompressedOops) {
5824 Label ok;
5825 testq(r, r);
5826 jcc(Assembler::notEqual, ok);
5827 STOP("null oop passed to encode_heap_oop_not_null");
5828 bind(ok);
5829 }
5830 #endif
5831 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5832 if (CompressedOops::base() != nullptr) {
5833 subq(r, r12_heapbase);
5834 }
5835 if (CompressedOops::shift() != 0) {
5836 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5837 shrq(r, LogMinObjAlignmentInBytes);
5838 }
5839 }
5840
5841 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5842 #ifdef ASSERT
5843 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5844 if (CheckCompressedOops) {
5845 Label ok;
5846 testq(src, src);
5847 jcc(Assembler::notEqual, ok);
5848 STOP("null oop passed to encode_heap_oop_not_null2");
5849 bind(ok);
5850 }
5851 #endif
5852 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5853 if (dst != src) {
5854 movq(dst, src);
5855 }
5856 if (CompressedOops::base() != nullptr) {
5857 subq(dst, r12_heapbase);
5858 }
5859 if (CompressedOops::shift() != 0) {
5860 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5861 shrq(dst, LogMinObjAlignmentInBytes);
5862 }
5863 }
5864
5865 void MacroAssembler::decode_heap_oop(Register r) {
5866 #ifdef ASSERT
5867 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5868 #endif
5869 if (CompressedOops::base() == nullptr) {
5870 if (CompressedOops::shift() != 0) {
5871 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5872 shlq(r, LogMinObjAlignmentInBytes);
5873 }
5874 } else {
5875 Label done;
5876 shlq(r, LogMinObjAlignmentInBytes);
5877 jccb(Assembler::equal, done);
5878 addq(r, r12_heapbase);
5879 bind(done);
5880 }
5881 verify_oop_msg(r, "broken oop in decode_heap_oop");
5882 }
5883
5884 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5885 // Note: it will change flags
5886 assert (UseCompressedOops, "should only be used for compressed headers");
5887 assert (Universe::heap() != nullptr, "java heap should be initialized");
5888 // Cannot assert, unverified entry point counts instructions (see .ad file)
5889 // vtableStubs also counts instructions in pd_code_size_limit.
5890 // Also do not verify_oop as this is called by verify_oop.
5891 if (CompressedOops::shift() != 0) {
5892 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5893 shlq(r, LogMinObjAlignmentInBytes);
5894 if (CompressedOops::base() != nullptr) {
5895 addq(r, r12_heapbase);
5896 }
5897 } else {
5898 assert (CompressedOops::base() == nullptr, "sanity");
5899 }
5900 }
5901
5902 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5903 // Note: it will change flags
5904 assert (UseCompressedOops, "should only be used for compressed headers");
5905 assert (Universe::heap() != nullptr, "java heap should be initialized");
5906 // Cannot assert, unverified entry point counts instructions (see .ad file)
5907 // vtableStubs also counts instructions in pd_code_size_limit.
5908 // Also do not verify_oop as this is called by verify_oop.
5909 if (CompressedOops::shift() != 0) {
5910 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5911 if (LogMinObjAlignmentInBytes == Address::times_8) {
5912 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5913 } else {
5914 if (dst != src) {
5915 movq(dst, src);
5916 }
5917 shlq(dst, LogMinObjAlignmentInBytes);
5918 if (CompressedOops::base() != nullptr) {
5919 addq(dst, r12_heapbase);
5920 }
5921 }
5922 } else {
5923 assert (CompressedOops::base() == nullptr, "sanity");
5924 if (dst != src) {
5925 movq(dst, src);
5926 }
5927 }
5928 }
5929
5930 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5931 BLOCK_COMMENT("encode_klass_not_null {");
5932 assert_different_registers(r, tmp);
5933 if (CompressedKlassPointers::base() != nullptr) {
5934 if (AOTCodeCache::is_on_for_dump()) {
5935 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5936 } else {
5937 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5938 }
5939 subq(r, tmp);
5940 }
5941 if (CompressedKlassPointers::shift() != 0) {
5942 shrq(r, CompressedKlassPointers::shift());
5943 }
5944 BLOCK_COMMENT("} encode_klass_not_null");
5945 }
5946
5947 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5948 BLOCK_COMMENT("encode_and_move_klass_not_null {");
5949 assert_different_registers(src, dst);
5950 if (CompressedKlassPointers::base() != nullptr) {
5951 movptr(dst, -(intptr_t)CompressedKlassPointers::base());
5952 addq(dst, src);
5953 } else {
5954 movptr(dst, src);
5955 }
5956 if (CompressedKlassPointers::shift() != 0) {
5957 shrq(dst, CompressedKlassPointers::shift());
5958 }
5959 BLOCK_COMMENT("} encode_and_move_klass_not_null");
5960 }
5961
5962 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5963 BLOCK_COMMENT("decode_klass_not_null {");
5964 assert_different_registers(r, tmp);
5965 // Note: it will change flags
5966 assert(UseCompressedClassPointers, "should only be used for compressed headers");
5967 // Cannot assert, unverified entry point counts instructions (see .ad file)
5968 // vtableStubs also counts instructions in pd_code_size_limit.
5969 // Also do not verify_oop as this is called by verify_oop.
5970 if (CompressedKlassPointers::shift() != 0) {
5971 shlq(r, CompressedKlassPointers::shift());
5972 }
5973 if (CompressedKlassPointers::base() != nullptr) {
5974 if (AOTCodeCache::is_on_for_dump()) {
5975 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5976 } else {
5977 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5978 }
5979 addq(r, tmp);
5980 }
5981 BLOCK_COMMENT("} decode_klass_not_null");
5982 }
5983
5984 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5985 BLOCK_COMMENT("decode_and_move_klass_not_null {");
5986 assert_different_registers(src, dst);
5987 // Note: it will change flags
5988 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5989 // Cannot assert, unverified entry point counts instructions (see .ad file)
5990 // vtableStubs also counts instructions in pd_code_size_limit.
5991 // Also do not verify_oop as this is called by verify_oop.
5992
5993 if (CompressedKlassPointers::base() == nullptr &&
5994 CompressedKlassPointers::shift() == 0) {
5995 // The best case scenario is that there is no base or shift. Then it is already
5996 // a pointer that needs nothing but a register rename.
5997 movl(dst, src);
5998 } else {
5999 if (CompressedKlassPointers::shift() <= Address::times_8) {
6000 if (CompressedKlassPointers::base() != nullptr) {
6001 movptr(dst, (intptr_t)CompressedKlassPointers::base());
6002 } else {
6003 xorq(dst, dst);
6004 }
6005 if (CompressedKlassPointers::shift() != 0) {
6006 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
6007 leaq(dst, Address(dst, src, Address::times_8, 0));
6008 } else {
6009 addq(dst, src);
6010 }
6011 } else {
6012 if (CompressedKlassPointers::base() != nullptr) {
6013 const intptr_t base_right_shifted =
6014 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
6015 movptr(dst, base_right_shifted);
6016 } else {
6017 xorq(dst, dst);
6018 }
6019 addq(dst, src);
6020 shlq(dst, CompressedKlassPointers::shift());
6021 }
6022 }
6023 BLOCK_COMMENT("} decode_and_move_klass_not_null");
6024 }
6025
6026 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6027 assert (UseCompressedOops, "should only be used for compressed headers");
6028 assert (Universe::heap() != nullptr, "java heap should be initialized");
6029 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6030 int oop_index = oop_recorder()->find_index(obj);
6031 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6032 mov_narrow_oop(dst, oop_index, rspec);
6033 }
6034
6035 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6036 assert (UseCompressedOops, "should only be used for compressed headers");
6037 assert (Universe::heap() != nullptr, "java heap should be initialized");
6038 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6039 int oop_index = oop_recorder()->find_index(obj);
6040 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6041 mov_narrow_oop(dst, oop_index, rspec);
6042 }
6043
6044 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
6045 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6046 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6047 int klass_index = oop_recorder()->find_index(k);
6048 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6049 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6050 }
6051
6052 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6053 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6054 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6055 int klass_index = oop_recorder()->find_index(k);
6056 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6057 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6058 }
6059
6060 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6061 assert (UseCompressedOops, "should only be used for compressed headers");
6062 assert (Universe::heap() != nullptr, "java heap should be initialized");
6063 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6064 int oop_index = oop_recorder()->find_index(obj);
6065 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6066 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6067 }
6068
6069 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6070 assert (UseCompressedOops, "should only be used for compressed headers");
6071 assert (Universe::heap() != nullptr, "java heap should be initialized");
6072 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6073 int oop_index = oop_recorder()->find_index(obj);
6074 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6075 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6076 }
6077
6078 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6079 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6080 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6081 int klass_index = oop_recorder()->find_index(k);
6082 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6083 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6084 }
6085
6086 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6087 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6088 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6089 int klass_index = oop_recorder()->find_index(k);
6090 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6091 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
6092 }
6093
6094 void MacroAssembler::reinit_heapbase() {
6095 if (UseCompressedOops) {
6096 if (Universe::heap() != nullptr) {
6097 if (CompressedOops::base() == nullptr) {
6098 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6099 } else {
6100 mov64(r12_heapbase, (int64_t)CompressedOops::base());
6101 }
6102 } else {
6103 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
6104 }
6105 }
6106 }
6107
6108 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6109 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6110 // An inline type might be returned. If fields are in registers we
6111 // need to allocate an inline type instance and initialize it with
6112 // the value of the fields.
6113 Label skip;
6114 // We only need a new buffered inline type if a new one is not returned
6115 testptr(rax, 1);
6116 jcc(Assembler::zero, skip);
6117 int call_offset = -1;
6118
6119 #ifdef _LP64
6120 // The following code is similar to allocate_instance but has some slight differences,
6121 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6122 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
6123 Label slow_case;
6124 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6125 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
6126 if (vk != nullptr) {
6127 // Called from C1, where the return type is statically known.
6128 movptr(rbx, (intptr_t)vk->get_InlineKlass());
6129 jint lh = vk->layout_helper();
6130 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6131 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
6132 tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
6133 } else {
6134 jmp(slow_case);
6135 }
6136 } else {
6137 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
6138 mov(rbx, rax);
6139 andptr(rbx, -2);
6140 if (UseTLAB) {
6141 movl(r14, Address(rbx, Klass::layout_helper_offset()));
6142 testl(r14, Klass::_lh_instance_slow_path_bit);
6143 jcc(Assembler::notZero, slow_case);
6144 tlab_allocate(rax, r14, 0, r13, r14, slow_case);
6145 } else {
6146 jmp(slow_case);
6147 }
6148 }
6149 if (UseTLAB) {
6150 // 2. Initialize buffered inline instance header
6151 Register buffer_obj = rax;
6152 Register klass = rbx;
6153 if (UseCompactObjectHeaders) {
6154 Register mark_word = r13;
6155 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
6156 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
6157 } else {
6158 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
6159 xorl(r13, r13);
6160 store_klass_gap(buffer_obj, r13);
6161 if (vk == nullptr) {
6162 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
6163 mov(r13, klass);
6164 }
6165 store_klass(buffer_obj, klass, rscratch1);
6166 klass = r13;
6167 }
6168 // 3. Initialize its fields with an inline class specific handler
6169 if (vk != nullptr) {
6170 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6171 } else {
6172 movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
6173 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
6174 call(rbx);
6175 }
6176 jmp(skip);
6177 }
6178 bind(slow_case);
6179 // We failed to allocate a new inline type, fall back to a runtime
6180 // call. Some oop field may be live in some registers but we can't
6181 // tell. That runtime call will take care of preserving them
6182 // across a GC if there's one.
6183 mov(rax, rscratch1);
6184 #endif
6185
6186 if (from_interpreter) {
6187 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
6188 } else {
6189 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
6190 call_offset = offset();
6191 }
6192
6193 bind(skip);
6194 return call_offset;
6195 }
6196
6197 // Move a value between registers/stack slots and update the reg_state
6198 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6199 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6200 if (reg_state[to->value()] == reg_written) {
6201 return true; // Already written
6202 }
6203 if (from != to && bt != T_VOID) {
6204 if (reg_state[to->value()] == reg_readonly) {
6205 return false; // Not yet writable
6206 }
6207 if (from->is_reg()) {
6208 if (to->is_reg()) {
6209 if (from->is_XMMRegister()) {
6210 if (bt == T_DOUBLE) {
6211 movdbl(to->as_XMMRegister(), from->as_XMMRegister());
6212 } else {
6213 assert(bt == T_FLOAT, "must be float");
6214 movflt(to->as_XMMRegister(), from->as_XMMRegister());
6215 }
6216 } else {
6217 movq(to->as_Register(), from->as_Register());
6218 }
6219 } else {
6220 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6221 Address to_addr = Address(rsp, st_off);
6222 if (from->is_XMMRegister()) {
6223 if (bt == T_DOUBLE) {
6224 movdbl(to_addr, from->as_XMMRegister());
6225 } else {
6226 assert(bt == T_FLOAT, "must be float");
6227 movflt(to_addr, from->as_XMMRegister());
6228 }
6229 } else {
6230 movq(to_addr, from->as_Register());
6231 }
6232 }
6233 } else {
6234 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
6235 if (to->is_reg()) {
6236 if (to->is_XMMRegister()) {
6237 if (bt == T_DOUBLE) {
6238 movdbl(to->as_XMMRegister(), from_addr);
6239 } else {
6240 assert(bt == T_FLOAT, "must be float");
6241 movflt(to->as_XMMRegister(), from_addr);
6242 }
6243 } else {
6244 movq(to->as_Register(), from_addr);
6245 }
6246 } else {
6247 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6248 movq(r13, from_addr);
6249 movq(Address(rsp, st_off), r13);
6250 }
6251 }
6252 }
6253 // Update register states
6254 reg_state[from->value()] = reg_writable;
6255 reg_state[to->value()] = reg_written;
6256 return true;
6257 }
6258
6259 // Calculate the extra stack space required for packing or unpacking inline
6260 // args and adjust the stack pointer.
6261 //
6262 // This extra stack space take into account the copy #2 of the return address,
6263 // but NOT the saved RBP or the normal size of the frame (see MacroAssembler::remove_frame
6264 // for notations).
6265 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6266 // Two additional slots to account for return address
6267 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
6268 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6269 // Save the return address, adjust the stack (make sure it is properly
6270 // 16-byte aligned) and copy the return address to the new top of the stack.
6271 // The stack will be repaired on return (see MacroAssembler::remove_frame).
6272 assert(sp_inc > 0, "sanity");
6273 pop(r13);
6274 subptr(rsp, sp_inc);
6275 #ifdef ASSERT
6276 movl(Address(rsp, -VMRegImpl::stack_slot_size), badRegWordVal);
6277 movl(Address(rsp, -2 * VMRegImpl::stack_slot_size), badRegWordVal);
6278 subptr(rsp, 2 * VMRegImpl::stack_slot_size);
6279 #else
6280 push(r13);
6281 #endif
6282 return sp_inc;
6283 }
6284
6285 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
6286 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6287 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6288 RegState reg_state[]) {
6289 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6290 assert(from->is_valid(), "source must be valid");
6291 bool progress = false;
6292 #ifdef ASSERT
6293 const int start_offset = offset();
6294 #endif
6295
6296 Label L_null, L_notNull;
6297 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6298 Register tmp1 = r10;
6299 Register tmp2 = r13;
6300 Register fromReg = noreg;
6301 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
6302 bool done = true;
6303 bool mark_done = true;
6304 VMReg toReg;
6305 BasicType bt;
6306 // Check if argument requires a null check
6307 bool null_check = false;
6308 VMReg nullCheckReg;
6309 while (stream.next(nullCheckReg, bt)) {
6310 if (sig->at(stream.sig_index())._offset == -1) {
6311 null_check = true;
6312 break;
6313 }
6314 }
6315 stream.reset(sig_index, to_index);
6316 while (stream.next(toReg, bt)) {
6317 assert(toReg->is_valid(), "destination must be valid");
6318 int idx = (int)toReg->value();
6319 if (reg_state[idx] == reg_readonly) {
6320 if (idx != from->value()) {
6321 mark_done = false;
6322 }
6323 done = false;
6324 continue;
6325 } else if (reg_state[idx] == reg_written) {
6326 continue;
6327 }
6328 assert(reg_state[idx] == reg_writable, "must be writable");
6329 reg_state[idx] = reg_written;
6330 progress = true;
6331
6332 if (fromReg == noreg) {
6333 if (from->is_reg()) {
6334 fromReg = from->as_Register();
6335 } else {
6336 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6337 movq(tmp1, Address(rsp, st_off));
6338 fromReg = tmp1;
6339 }
6340 if (null_check) {
6341 // Nullable inline type argument, emit null check
6342 testptr(fromReg, fromReg);
6343 jcc(Assembler::zero, L_null);
6344 }
6345 }
6346 int off = sig->at(stream.sig_index())._offset;
6347 if (off == -1) {
6348 assert(null_check, "Missing null check at");
6349 if (toReg->is_stack()) {
6350 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6351 movq(Address(rsp, st_off), 1);
6352 } else {
6353 movq(toReg->as_Register(), 1);
6354 }
6355 continue;
6356 }
6357 assert(off > 0, "offset in object should be positive");
6358 Address fromAddr = Address(fromReg, off);
6359 if (!toReg->is_XMMRegister()) {
6360 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6361 if (is_reference_type(bt)) {
6362 load_heap_oop(dst, fromAddr);
6363 } else {
6364 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6365 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6366 }
6367 if (toReg->is_stack()) {
6368 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6369 movq(Address(rsp, st_off), dst);
6370 }
6371 } else if (bt == T_DOUBLE) {
6372 movdbl(toReg->as_XMMRegister(), fromAddr);
6373 } else {
6374 assert(bt == T_FLOAT, "must be float");
6375 movflt(toReg->as_XMMRegister(), fromAddr);
6376 }
6377 }
6378 if (progress && null_check) {
6379 if (done) {
6380 jmp(L_notNull);
6381 bind(L_null);
6382 // Set null marker to zero to signal that the argument is null.
6383 // Also set all fields to zero since the runtime requires a canonical
6384 // representation of a flat null.
6385 stream.reset(sig_index, to_index);
6386 while (stream.next(toReg, bt)) {
6387 if (toReg->is_stack()) {
6388 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6389 movq(Address(rsp, st_off), 0);
6390 } else if (toReg->is_XMMRegister()) {
6391 xorps(toReg->as_XMMRegister(), toReg->as_XMMRegister());
6392 } else {
6393 xorl(toReg->as_Register(), toReg->as_Register());
6394 }
6395 }
6396 bind(L_notNull);
6397 } else {
6398 bind(L_null);
6399 }
6400 }
6401
6402 sig_index = stream.sig_index();
6403 to_index = stream.regs_index();
6404
6405 if (mark_done && reg_state[from->value()] != reg_written) {
6406 // This is okay because no one else will write to that slot
6407 reg_state[from->value()] = reg_writable;
6408 }
6409 from_index--;
6410 assert(progress || (start_offset == offset()), "should not emit code");
6411 return done;
6412 }
6413
6414 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6415 VMRegPair* from, int from_count, int& from_index, VMReg to,
6416 RegState reg_state[], Register val_array) {
6417 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
6418 assert(to->is_valid(), "destination must be valid");
6419
6420 if (reg_state[to->value()] == reg_written) {
6421 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6422 return true; // Already written
6423 }
6424
6425 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
6426 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6427 Register val_obj_tmp = r11;
6428 Register from_reg_tmp = r14;
6429 Register tmp1 = r10;
6430 Register tmp2 = r13;
6431 Register tmp3 = rbx;
6432 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6433
6434 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6435
6436 if (reg_state[to->value()] == reg_readonly) {
6437 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6438 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6439 return false; // Not yet writable
6440 }
6441 val_obj = val_obj_tmp;
6442 }
6443
6444 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
6445 load_heap_oop(val_obj, Address(val_array, index));
6446
6447 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6448 VMReg fromReg;
6449 BasicType bt;
6450 Label L_null;
6451 while (stream.next(fromReg, bt)) {
6452 assert(fromReg->is_valid(), "source must be valid");
6453 reg_state[fromReg->value()] = reg_writable;
6454
6455 int off = sig->at(stream.sig_index())._offset;
6456 if (off == -1) {
6457 // Nullable inline type argument, emit null check
6458 Label L_notNull;
6459 if (fromReg->is_stack()) {
6460 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6461 testb(Address(rsp, ld_off), 1);
6462 } else {
6463 testb(fromReg->as_Register(), 1);
6464 }
6465 jcc(Assembler::notZero, L_notNull);
6466 movptr(val_obj, 0);
6467 jmp(L_null);
6468 bind(L_notNull);
6469 continue;
6470 }
6471
6472 assert(off > 0, "offset in object should be positive");
6473 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6474
6475 // Pack the scalarized field into the value object.
6476 Address dst(val_obj, off);
6477 if (!fromReg->is_XMMRegister()) {
6478 Register src;
6479 if (fromReg->is_stack()) {
6480 src = from_reg_tmp;
6481 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6482 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
6483 } else {
6484 src = fromReg->as_Register();
6485 }
6486 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6487 if (is_reference_type(bt)) {
6488 // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
6489 mov(tmp3, val_obj);
6490 Address dst_with_tmp3(tmp3, off);
6491 store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6492 } else {
6493 store_sized_value(dst, src, size_in_bytes);
6494 }
6495 } else if (bt == T_DOUBLE) {
6496 movdbl(dst, fromReg->as_XMMRegister());
6497 } else {
6498 assert(bt == T_FLOAT, "must be float");
6499 movflt(dst, fromReg->as_XMMRegister());
6500 }
6501 }
6502 bind(L_null);
6503 sig_index = stream.sig_index();
6504 from_index = stream.regs_index();
6505
6506 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6507 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6508 assert(success, "to register must be writeable");
6509 return true;
6510 }
6511
6512 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6513 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
6514 }
6515
6516 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6517 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6518 if (needs_stack_repair) {
6519 // The method has a scalarized entry point (where fields of value object arguments
6520 // are passed through registers and stack), and a non-scalarized entry point (where
6521 // value object arguments are given as oops). The non-scalarized entry point will
6522 // first load each field of value object arguments and store them in registers and on
6523 // the stack in a way compatible with the scalarized entry point. To do so, some extra
6524 // stack space might be reserved (if argument registers are not enough). On leaving the
6525 // method, this space must be freed.
6526 //
6527 // In case we used the non-scalarized entry point the stack looks like this:
6528 //
6529 // | Arguments from caller |
6530 // |---------------------------| <-- caller's SP
6531 // | Return address #1 |
6532 // |---------------------------|
6533 // | Extension space for |
6534 // | inline arg (un)packing |
6535 // |---------------------------|
6536 // | Return address #2 |
6537 // | Saved RBP |
6538 // |---------------------------| <-- start of this method's frame
6539 // | sp_inc |
6540 // | method locals |
6541 // |---------------------------| <-- SP
6542 //
6543 // There is two copies of the return address on the stack. They will be identical at
6544 // first, but that can change.
6545 // If the caller has been deoptimized, the copy #1 will be patched to point at the
6546 // deopt blob, and the copy #2 will still point into the old method. In short
6547 // the copy #2 is not reliable and should not be used. It is mostly needed to
6548 // add space between the extension space and the locals, as there would be between
6549 // the real arguments and the locals if we don't need to do unpacking (from the
6550 // scalarized entry point).
6551 //
6552 // When leaving, one must use the copy #1 of the return address, while keeping in mind
6553 // that from the scalarized entry point, there will be only one copy. Indeed, in the
6554 // case we used the scalarized calling convention, the stack looks like this:
6555 //
6556 // | Arguments from caller |
6557 // |---------------------------| <-- caller's SP
6558 // | Return address |
6559 // | Saved RBP |
6560 // |---------------------------| <-- start of this method's frame
6561 // | sp_inc |
6562 // | method locals |
6563 // |---------------------------| <-- SP
6564 //
6565 // The sp_inc stack slot holds the total size of the frame, including the extension
6566 // space the possible copy #2 of the return address and the saved RBP (but never the
6567 // copy #1 of the return address). That is how to find the copy #1 of the return address.
6568 // This size is expressed in bytes. Be careful when using it from C++ in pointer arithmetic;
6569 // you might need to divide it by wordSize.
6570 //
6571 // One can find sp_inc since the start the method's frame is SP + initial_framesize.
6572
6573 movq(rbp, Address(rsp, initial_framesize));
6574 // The stack increment resides just below the saved rbp
6575 addq(rsp, Address(rsp, initial_framesize - wordSize));
6576 } else {
6577 if (initial_framesize > 0) {
6578 addq(rsp, initial_framesize);
6579 }
6580 pop(rbp);
6581 }
6582 }
6583
6584 #if COMPILER2_OR_JVMCI
6585
6586 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6587 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
6588 // cnt - number of qwords (8-byte words).
6589 // base - start address, qword aligned.
6590 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6591 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
6592 if (use64byteVector) {
6593 evpbroadcastq(xtmp, val, AVX_512bit);
6594 } else if (MaxVectorSize >= 32) {
6595 movdq(xtmp, val);
6596 punpcklqdq(xtmp, xtmp);
6597 vinserti128_high(xtmp, xtmp);
6598 } else {
6599 movdq(xtmp, val);
6600 punpcklqdq(xtmp, xtmp);
6601 }
6602 jmp(L_zero_64_bytes);
6603
6604 BIND(L_loop);
6605 if (MaxVectorSize >= 32) {
6606 fill64(base, 0, xtmp, use64byteVector);
6607 } else {
6608 movdqu(Address(base, 0), xtmp);
6609 movdqu(Address(base, 16), xtmp);
6610 movdqu(Address(base, 32), xtmp);
6611 movdqu(Address(base, 48), xtmp);
6612 }
6613 addptr(base, 64);
6614
6615 BIND(L_zero_64_bytes);
6616 subptr(cnt, 8);
6617 jccb(Assembler::greaterEqual, L_loop);
6618
6619 // Copy trailing 64 bytes
6620 if (use64byteVector) {
6621 addptr(cnt, 8);
6622 jccb(Assembler::equal, L_end);
6623 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
6624 jmp(L_end);
6625 } else {
6626 addptr(cnt, 4);
6627 jccb(Assembler::less, L_tail);
6628 if (MaxVectorSize >= 32) {
6629 vmovdqu(Address(base, 0), xtmp);
6630 } else {
6631 movdqu(Address(base, 0), xtmp);
6632 movdqu(Address(base, 16), xtmp);
6633 }
6634 }
6635 addptr(base, 32);
6636 subptr(cnt, 4);
6637
6638 BIND(L_tail);
6639 addptr(cnt, 4);
6640 jccb(Assembler::lessEqual, L_end);
6641 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6642 fill32_masked(3, base, 0, xtmp, mask, cnt, val);
6643 } else {
6644 decrement(cnt);
6645
6646 BIND(L_sloop);
6647 movq(Address(base, 0), xtmp);
6648 addptr(base, 8);
6649 decrement(cnt);
6650 jccb(Assembler::greaterEqual, L_sloop);
6651 }
6652 BIND(L_end);
6653 }
6654
6655 // Clearing constant sized memory using YMM/ZMM registers.
6656 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6657 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
6658 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
6659
6660 int vector64_count = (cnt & (~0x7)) >> 3;
6661 cnt = cnt & 0x7;
6662 const int fill64_per_loop = 4;
6663 const int max_unrolled_fill64 = 8;
6664
6665 // 64 byte initialization loop.
6666 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
6667 int start64 = 0;
6668 if (vector64_count > max_unrolled_fill64) {
6669 Label LOOP;
6670 Register index = rtmp;
6671
6672 start64 = vector64_count - (vector64_count % fill64_per_loop);
6673
6674 movl(index, 0);
6675 BIND(LOOP);
6676 for (int i = 0; i < fill64_per_loop; i++) {
6677 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
6678 }
6679 addl(index, fill64_per_loop * 64);
6680 cmpl(index, start64 * 64);
6681 jccb(Assembler::less, LOOP);
6682 }
6683 for (int i = start64; i < vector64_count; i++) {
6684 fill64(base, i * 64, xtmp, use64byteVector);
6685 }
6686
6687 // Clear remaining 64 byte tail.
6688 int disp = vector64_count * 64;
6689 if (cnt) {
6690 switch (cnt) {
6691 case 1:
6692 movq(Address(base, disp), xtmp);
6693 break;
6694 case 2:
6695 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
6696 break;
6697 case 3:
6698 movl(rtmp, 0x7);
6699 kmovwl(mask, rtmp);
6700 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
6701 break;
6702 case 4:
6703 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6704 break;
6705 case 5:
6706 if (use64byteVector) {
6707 movl(rtmp, 0x1F);
6708 kmovwl(mask, rtmp);
6709 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6710 } else {
6711 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6712 movq(Address(base, disp + 32), xtmp);
6713 }
6714 break;
6715 case 6:
6716 if (use64byteVector) {
6717 movl(rtmp, 0x3F);
6718 kmovwl(mask, rtmp);
6719 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6720 } else {
6721 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6722 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
6723 }
6724 break;
6725 case 7:
6726 if (use64byteVector) {
6727 movl(rtmp, 0x7F);
6728 kmovwl(mask, rtmp);
6729 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6730 } else {
6731 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6732 movl(rtmp, 0x7);
6733 kmovwl(mask, rtmp);
6734 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
6735 }
6736 break;
6737 default:
6738 fatal("Unexpected length : %d\n",cnt);
6739 break;
6740 }
6741 }
6742 }
6743
6744 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
6745 bool is_large, bool word_copy_only, KRegister mask) {
6746 // cnt - number of qwords (8-byte words).
6747 // base - start address, qword aligned.
6748 // is_large - if optimizers know cnt is larger than InitArrayShortSize
6749 assert(base==rdi, "base register must be edi for rep stos");
6750 assert(val==rax, "val register must be eax for rep stos");
6751 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6752 assert(InitArrayShortSize % BytesPerLong == 0,
6753 "InitArrayShortSize should be the multiple of BytesPerLong");
6754
6755 Label DONE;
6756
6757 if (!is_large) {
6758 Label LOOP, LONG;
6759 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
6760 jccb(Assembler::greater, LONG);
6761
6762 decrement(cnt);
6763 jccb(Assembler::negative, DONE); // Zero length
6764
6765 // Use individual pointer-sized stores for small counts:
6766 BIND(LOOP);
6767 movptr(Address(base, cnt, Address::times_ptr), val);
6768 decrement(cnt);
6769 jccb(Assembler::greaterEqual, LOOP);
6770 jmpb(DONE);
6771
6772 BIND(LONG);
6773 }
6774
6775 // Use longer rep-prefixed ops for non-small counts:
6776 if (UseFastStosb && !word_copy_only) {
6777 shlptr(cnt, 3); // convert to number of bytes
6778 rep_stosb();
6779 } else if (UseXMMForObjInit) {
6780 xmm_clear_mem(base, cnt, val, xtmp, mask);
6781 } else {
6782 rep_stos();
6783 }
6784
6785 BIND(DONE);
6786 }
6787
6788 #endif //COMPILER2_OR_JVMCI
6789
6790
6791 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6792 Register to, Register value, Register count,
6793 Register rtmp, XMMRegister xtmp) {
6794 ShortBranchVerifier sbv(this);
6795 assert_different_registers(to, value, count, rtmp);
6796 Label L_exit;
6797 Label L_fill_2_bytes, L_fill_4_bytes;
6798
6799 #if defined(COMPILER2)
6800 if(MaxVectorSize >=32 &&
6801 VM_Version::supports_avx512vlbw() &&
6802 VM_Version::supports_bmi2()) {
6803 generate_fill_avx3(t, to, value, count, rtmp, xtmp);
6804 return;
6805 }
6806 #endif
6807
6808 int shift = -1;
6809 switch (t) {
6810 case T_BYTE:
6811 shift = 2;
6812 break;
6813 case T_SHORT:
6814 shift = 1;
6815 break;
6816 case T_INT:
6817 shift = 0;
6818 break;
6819 default: ShouldNotReachHere();
6820 }
6821
6822 if (t == T_BYTE) {
6823 andl(value, 0xff);
6824 movl(rtmp, value);
6825 shll(rtmp, 8);
6826 orl(value, rtmp);
6827 }
6828 if (t == T_SHORT) {
6829 andl(value, 0xffff);
6830 }
6831 if (t == T_BYTE || t == T_SHORT) {
6832 movl(rtmp, value);
6833 shll(rtmp, 16);
6834 orl(value, rtmp);
6835 }
6836
6837 cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
6838 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
6839 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
6840 Label L_skip_align2;
6841 // align source address at 4 bytes address boundary
6842 if (t == T_BYTE) {
6843 Label L_skip_align1;
6844 // One byte misalignment happens only for byte arrays
6845 testptr(to, 1);
6846 jccb(Assembler::zero, L_skip_align1);
6847 movb(Address(to, 0), value);
6848 increment(to);
6849 decrement(count);
6850 BIND(L_skip_align1);
6851 }
6852 // Two bytes misalignment happens only for byte and short (char) arrays
6853 testptr(to, 2);
6854 jccb(Assembler::zero, L_skip_align2);
6855 movw(Address(to, 0), value);
6856 addptr(to, 2);
6857 subptr(count, 1<<(shift-1));
6858 BIND(L_skip_align2);
6859 }
6860 {
6861 Label L_fill_32_bytes;
6862 if (!UseUnalignedLoadStores) {
6863 // align to 8 bytes, we know we are 4 byte aligned to start
6864 testptr(to, 4);
6865 jccb(Assembler::zero, L_fill_32_bytes);
6866 movl(Address(to, 0), value);
6867 addptr(to, 4);
6868 subptr(count, 1<<shift);
6869 }
6870 BIND(L_fill_32_bytes);
6871 {
6872 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
6873 movdl(xtmp, value);
6874 if (UseAVX >= 2 && UseUnalignedLoadStores) {
6875 Label L_check_fill_32_bytes;
6876 if (UseAVX > 2) {
6877 // Fill 64-byte chunks
6878 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
6879
6880 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
6881 cmpptr(count, VM_Version::avx3_threshold());
6882 jccb(Assembler::below, L_check_fill_64_bytes_avx2);
6883
6884 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
6885
6886 subptr(count, 16 << shift);
6887 jcc(Assembler::less, L_check_fill_32_bytes);
6888 align(16);
6889
6890 BIND(L_fill_64_bytes_loop_avx3);
6891 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
6892 addptr(to, 64);
6893 subptr(count, 16 << shift);
6894 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
6895 jmpb(L_check_fill_32_bytes);
6896
6897 BIND(L_check_fill_64_bytes_avx2);
6898 }
6899 // Fill 64-byte chunks
6900 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
6901
6902 subptr(count, 16 << shift);
6903 jcc(Assembler::less, L_check_fill_32_bytes);
6904
6905 // align data for 64-byte chunks
6906 Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
6907 if (EnableX86ECoreOpts) {
6908 // align 'big' arrays to cache lines to minimize split_stores
6909 cmpptr(count, 96 << shift);
6910 jcc(Assembler::below, L_fill_64_bytes_loop);
6911
6912 // Find the bytes needed for alignment
6913 movptr(rtmp, to);
6914 andptr(rtmp, 0x1c);
6915 jcc(Assembler::zero, L_fill_64_bytes_loop);
6916 negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
6917 addptr(rtmp, 32);
6918 shrptr(rtmp, 2 - shift);// get number of elements from bytes
6919 subptr(count, rtmp); // adjust count by number of elements
6920
6921 align(16);
6922 BIND(L_align_64_bytes_loop);
6923 movdl(Address(to, 0), xtmp);
6924 addptr(to, 4);
6925 subptr(rtmp, 1 << shift);
6926 jcc(Assembler::greater, L_align_64_bytes_loop);
6927 }
6928
6929 align(16);
6930 BIND(L_fill_64_bytes_loop);
6931 vmovdqu(Address(to, 0), xtmp);
6932 vmovdqu(Address(to, 32), xtmp);
6933 addptr(to, 64);
6934 subptr(count, 16 << shift);
6935 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
6936
6937 align(16);
6938 BIND(L_check_fill_32_bytes);
6939 addptr(count, 8 << shift);
6940 jccb(Assembler::less, L_check_fill_8_bytes);
6941 vmovdqu(Address(to, 0), xtmp);
6942 addptr(to, 32);
6943 subptr(count, 8 << shift);
6944
6945 BIND(L_check_fill_8_bytes);
6946 // clean upper bits of YMM registers
6947 movdl(xtmp, value);
6948 pshufd(xtmp, xtmp, 0);
6949 } else {
6950 // Fill 32-byte chunks
6951 pshufd(xtmp, xtmp, 0);
6952
6953 subptr(count, 8 << shift);
6954 jcc(Assembler::less, L_check_fill_8_bytes);
6955 align(16);
6956
6957 BIND(L_fill_32_bytes_loop);
6958
6959 if (UseUnalignedLoadStores) {
6960 movdqu(Address(to, 0), xtmp);
6961 movdqu(Address(to, 16), xtmp);
6962 } else {
6963 movq(Address(to, 0), xtmp);
6964 movq(Address(to, 8), xtmp);
6965 movq(Address(to, 16), xtmp);
6966 movq(Address(to, 24), xtmp);
6967 }
6968
6969 addptr(to, 32);
6970 subptr(count, 8 << shift);
6971 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
6972
6973 BIND(L_check_fill_8_bytes);
6974 }
6975 addptr(count, 8 << shift);
6976 jccb(Assembler::zero, L_exit);
6977 jmpb(L_fill_8_bytes);
6978
6979 //
6980 // length is too short, just fill qwords
6981 //
6982 align(16);
6983 BIND(L_fill_8_bytes_loop);
6984 movq(Address(to, 0), xtmp);
6985 addptr(to, 8);
6986 BIND(L_fill_8_bytes);
6987 subptr(count, 1 << (shift + 1));
6988 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
6989 }
6990 }
6991
6992 Label L_fill_4_bytes_loop;
6993 testl(count, 1 << shift);
6994 jccb(Assembler::zero, L_fill_2_bytes);
6995
6996 align(16);
6997 BIND(L_fill_4_bytes_loop);
6998 movl(Address(to, 0), value);
6999 addptr(to, 4);
7000
7001 BIND(L_fill_4_bytes);
7002 subptr(count, 1 << shift);
7003 jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
7004
7005 if (t == T_BYTE || t == T_SHORT) {
7006 Label L_fill_byte;
7007 BIND(L_fill_2_bytes);
7008 // fill trailing 2 bytes
7009 testl(count, 1<<(shift-1));
7010 jccb(Assembler::zero, L_fill_byte);
7011 movw(Address(to, 0), value);
7012 if (t == T_BYTE) {
7013 addptr(to, 2);
7014 BIND(L_fill_byte);
7015 // fill trailing byte
7016 testl(count, 1);
7017 jccb(Assembler::zero, L_exit);
7018 movb(Address(to, 0), value);
7019 } else {
7020 BIND(L_fill_byte);
7021 }
7022 } else {
7023 BIND(L_fill_2_bytes);
7024 }
7025 BIND(L_exit);
7026 }
7027
7028 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
7029 switch(type) {
7030 case T_BYTE:
7031 case T_BOOLEAN:
7032 evpbroadcastb(dst, src, vector_len);
7033 break;
7034 case T_SHORT:
7035 case T_CHAR:
7036 evpbroadcastw(dst, src, vector_len);
7037 break;
7038 case T_INT:
7039 case T_FLOAT:
7040 evpbroadcastd(dst, src, vector_len);
7041 break;
7042 case T_LONG:
7043 case T_DOUBLE:
7044 evpbroadcastq(dst, src, vector_len);
7045 break;
7046 default:
7047 fatal("Unhandled type : %s", type2name(type));
7048 break;
7049 }
7050 }
7051
7052 // encode char[] to byte[] in ISO_8859_1 or ASCII
7053 //@IntrinsicCandidate
7054 //private static int implEncodeISOArray(byte[] sa, int sp,
7055 //byte[] da, int dp, int len) {
7056 // int i = 0;
7057 // for (; i < len; i++) {
7058 // char c = StringUTF16.getChar(sa, sp++);
7059 // if (c > '\u00FF')
7060 // break;
7061 // da[dp++] = (byte)c;
7062 // }
7063 // return i;
7064 //}
7065 //
7066 //@IntrinsicCandidate
7067 //private static int implEncodeAsciiArray(char[] sa, int sp,
7068 // byte[] da, int dp, int len) {
7069 // int i = 0;
7070 // for (; i < len; i++) {
7071 // char c = sa[sp++];
7072 // if (c >= '\u0080')
7073 // break;
7074 // da[dp++] = (byte)c;
7075 // }
7076 // return i;
7077 //}
7078 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7079 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7080 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7081 Register tmp5, Register result, bool ascii) {
7082
7083 // rsi: src
7084 // rdi: dst
7085 // rdx: len
7086 // rcx: tmp5
7087 // rax: result
7088 ShortBranchVerifier sbv(this);
7089 assert_different_registers(src, dst, len, tmp5, result);
7090 Label L_done, L_copy_1_char, L_copy_1_char_exit;
7091
7092 int mask = ascii ? 0xff80ff80 : 0xff00ff00;
7093 int short_mask = ascii ? 0xff80 : 0xff00;
7094
7095 // set result
7096 xorl(result, result);
7097 // check for zero length
7098 testl(len, len);
7099 jcc(Assembler::zero, L_done);
7100
7101 movl(result, len);
7102
7103 // Setup pointers
7104 lea(src, Address(src, len, Address::times_2)); // char[]
7105 lea(dst, Address(dst, len, Address::times_1)); // byte[]
7106 negptr(len);
7107
7108 if (UseSSE42Intrinsics || UseAVX >= 2) {
7109 Label L_copy_8_chars, L_copy_8_chars_exit;
7110 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7111
7112 if (UseAVX >= 2) {
7113 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7114 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7115 movdl(tmp1Reg, tmp5);
7116 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
7117 jmp(L_chars_32_check);
7118
7119 bind(L_copy_32_chars);
7120 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7121 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7122 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7123 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7124 jccb(Assembler::notZero, L_copy_32_chars_exit);
7125 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7126 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
7127 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7128
7129 bind(L_chars_32_check);
7130 addptr(len, 32);
7131 jcc(Assembler::lessEqual, L_copy_32_chars);
7132
7133 bind(L_copy_32_chars_exit);
7134 subptr(len, 16);
7135 jccb(Assembler::greater, L_copy_16_chars_exit);
7136
7137 } else if (UseSSE42Intrinsics) {
7138 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
7139 movdl(tmp1Reg, tmp5);
7140 pshufd(tmp1Reg, tmp1Reg, 0);
7141 jmpb(L_chars_16_check);
7142 }
7143
7144 bind(L_copy_16_chars);
7145 if (UseAVX >= 2) {
7146 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7147 vptest(tmp2Reg, tmp1Reg);
7148 jcc(Assembler::notZero, L_copy_16_chars_exit);
7149 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
7150 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
7151 } else {
7152 if (UseAVX > 0) {
7153 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7154 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7155 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
7156 } else {
7157 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7158 por(tmp2Reg, tmp3Reg);
7159 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7160 por(tmp2Reg, tmp4Reg);
7161 }
7162 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
7163 jccb(Assembler::notZero, L_copy_16_chars_exit);
7164 packuswb(tmp3Reg, tmp4Reg);
7165 }
7166 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7167
7168 bind(L_chars_16_check);
7169 addptr(len, 16);
7170 jcc(Assembler::lessEqual, L_copy_16_chars);
7171
7172 bind(L_copy_16_chars_exit);
7173 if (UseAVX >= 2) {
7174 // clean upper bits of YMM registers
7175 vpxor(tmp2Reg, tmp2Reg);
7176 vpxor(tmp3Reg, tmp3Reg);
7177 vpxor(tmp4Reg, tmp4Reg);
7178 movdl(tmp1Reg, tmp5);
7179 pshufd(tmp1Reg, tmp1Reg, 0);
7180 }
7181 subptr(len, 8);
7182 jccb(Assembler::greater, L_copy_8_chars_exit);
7183
7184 bind(L_copy_8_chars);
7185 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7186 ptest(tmp3Reg, tmp1Reg);
7187 jccb(Assembler::notZero, L_copy_8_chars_exit);
7188 packuswb(tmp3Reg, tmp1Reg);
7189 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7190 addptr(len, 8);
7191 jccb(Assembler::lessEqual, L_copy_8_chars);
7192
7193 bind(L_copy_8_chars_exit);
7194 subptr(len, 8);
7195 jccb(Assembler::zero, L_done);
7196 }
7197
7198 bind(L_copy_1_char);
7199 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7200 testl(tmp5, short_mask); // check if Unicode or non-ASCII char
7201 jccb(Assembler::notZero, L_copy_1_char_exit);
7202 movb(Address(dst, len, Address::times_1, 0), tmp5);
7203 addptr(len, 1);
7204 jccb(Assembler::less, L_copy_1_char);
7205
7206 bind(L_copy_1_char_exit);
7207 addptr(result, len); // len is negative count of not processed elements
7208
7209 bind(L_done);
7210 }
7211
7212 /**
7213 * Helper for multiply_to_len().
7214 */
7215 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7216 addq(dest_lo, src1);
7217 adcq(dest_hi, 0);
7218 addq(dest_lo, src2);
7219 adcq(dest_hi, 0);
7220 }
7221
7222 /**
7223 * Multiply 64 bit by 64 bit first loop.
7224 */
7225 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7226 Register y, Register y_idx, Register z,
7227 Register carry, Register product,
7228 Register idx, Register kdx) {
7229 //
7230 // jlong carry, x[], y[], z[];
7231 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7232 // huge_128 product = y[idx] * x[xstart] + carry;
7233 // z[kdx] = (jlong)product;
7234 // carry = (jlong)(product >>> 64);
7235 // }
7236 // z[xstart] = carry;
7237 //
7238
7239 Label L_first_loop, L_first_loop_exit;
7240 Label L_one_x, L_one_y, L_multiply;
7241
7242 decrementl(xstart);
7243 jcc(Assembler::negative, L_one_x);
7244
7245 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7246 rorq(x_xstart, 32); // convert big-endian to little-endian
7247
7248 bind(L_first_loop);
7249 decrementl(idx);
7250 jcc(Assembler::negative, L_first_loop_exit);
7251 decrementl(idx);
7252 jcc(Assembler::negative, L_one_y);
7253 movq(y_idx, Address(y, idx, Address::times_4, 0));
7254 rorq(y_idx, 32); // convert big-endian to little-endian
7255 bind(L_multiply);
7256 movq(product, x_xstart);
7257 mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7258 addq(product, carry);
7259 adcq(rdx, 0);
7260 subl(kdx, 2);
7261 movl(Address(z, kdx, Address::times_4, 4), product);
7262 shrq(product, 32);
7263 movl(Address(z, kdx, Address::times_4, 0), product);
7264 movq(carry, rdx);
7265 jmp(L_first_loop);
7266
7267 bind(L_one_y);
7268 movl(y_idx, Address(y, 0));
7269 jmp(L_multiply);
7270
7271 bind(L_one_x);
7272 movl(x_xstart, Address(x, 0));
7273 jmp(L_first_loop);
7274
7275 bind(L_first_loop_exit);
7276 }
7277
7278 /**
7279 * Multiply 64 bit by 64 bit and add 128 bit.
7280 */
7281 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7282 Register yz_idx, Register idx,
7283 Register carry, Register product, int offset) {
7284 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7285 // z[kdx] = (jlong)product;
7286
7287 movq(yz_idx, Address(y, idx, Address::times_4, offset));
7288 rorq(yz_idx, 32); // convert big-endian to little-endian
7289 movq(product, x_xstart);
7290 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7291 movq(yz_idx, Address(z, idx, Address::times_4, offset));
7292 rorq(yz_idx, 32); // convert big-endian to little-endian
7293
7294 add2_with_carry(rdx, product, carry, yz_idx);
7295
7296 movl(Address(z, idx, Address::times_4, offset+4), product);
7297 shrq(product, 32);
7298 movl(Address(z, idx, Address::times_4, offset), product);
7299
7300 }
7301
7302 /**
7303 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7304 */
7305 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7306 Register yz_idx, Register idx, Register jdx,
7307 Register carry, Register product,
7308 Register carry2) {
7309 // jlong carry, x[], y[], z[];
7310 // int kdx = ystart+1;
7311 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7312 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7313 // z[kdx+idx+1] = (jlong)product;
7314 // jlong carry2 = (jlong)(product >>> 64);
7315 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7316 // z[kdx+idx] = (jlong)product;
7317 // carry = (jlong)(product >>> 64);
7318 // }
7319 // idx += 2;
7320 // if (idx > 0) {
7321 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7322 // z[kdx+idx] = (jlong)product;
7323 // carry = (jlong)(product >>> 64);
7324 // }
7325 //
7326
7327 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7328
7329 movl(jdx, idx);
7330 andl(jdx, 0xFFFFFFFC);
7331 shrl(jdx, 2);
7332
7333 bind(L_third_loop);
7334 subl(jdx, 1);
7335 jcc(Assembler::negative, L_third_loop_exit);
7336 subl(idx, 4);
7337
7338 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7339 movq(carry2, rdx);
7340
7341 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7342 movq(carry, rdx);
7343 jmp(L_third_loop);
7344
7345 bind (L_third_loop_exit);
7346
7347 andl (idx, 0x3);
7348 jcc(Assembler::zero, L_post_third_loop_done);
7349
7350 Label L_check_1;
7351 subl(idx, 2);
7352 jcc(Assembler::negative, L_check_1);
7353
7354 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7355 movq(carry, rdx);
7356
7357 bind (L_check_1);
7358 addl (idx, 0x2);
7359 andl (idx, 0x1);
7360 subl(idx, 1);
7361 jcc(Assembler::negative, L_post_third_loop_done);
7362
7363 movl(yz_idx, Address(y, idx, Address::times_4, 0));
7364 movq(product, x_xstart);
7365 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7366 movl(yz_idx, Address(z, idx, Address::times_4, 0));
7367
7368 add2_with_carry(rdx, product, yz_idx, carry);
7369
7370 movl(Address(z, idx, Address::times_4, 0), product);
7371 shrq(product, 32);
7372
7373 shlq(rdx, 32);
7374 orq(product, rdx);
7375 movq(carry, product);
7376
7377 bind(L_post_third_loop_done);
7378 }
7379
7380 /**
7381 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7382 *
7383 */
7384 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7385 Register carry, Register carry2,
7386 Register idx, Register jdx,
7387 Register yz_idx1, Register yz_idx2,
7388 Register tmp, Register tmp3, Register tmp4) {
7389 assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7390
7391 // jlong carry, x[], y[], z[];
7392 // int kdx = ystart+1;
7393 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7394 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7395 // jlong carry2 = (jlong)(tmp3 >>> 64);
7396 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2;
7397 // carry = (jlong)(tmp4 >>> 64);
7398 // z[kdx+idx+1] = (jlong)tmp3;
7399 // z[kdx+idx] = (jlong)tmp4;
7400 // }
7401 // idx += 2;
7402 // if (idx > 0) {
7403 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7404 // z[kdx+idx] = (jlong)yz_idx1;
7405 // carry = (jlong)(yz_idx1 >>> 64);
7406 // }
7407 //
7408
7409 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7410
7411 movl(jdx, idx);
7412 andl(jdx, 0xFFFFFFFC);
7413 shrl(jdx, 2);
7414
7415 bind(L_third_loop);
7416 subl(jdx, 1);
7417 jcc(Assembler::negative, L_third_loop_exit);
7418 subl(idx, 4);
7419
7420 movq(yz_idx1, Address(y, idx, Address::times_4, 8));
7421 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7422 movq(yz_idx2, Address(y, idx, Address::times_4, 0));
7423 rorxq(yz_idx2, yz_idx2, 32);
7424
7425 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7426 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp
7427
7428 movq(yz_idx1, Address(z, idx, Address::times_4, 8));
7429 rorxq(yz_idx1, yz_idx1, 32);
7430 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7431 rorxq(yz_idx2, yz_idx2, 32);
7432
7433 if (VM_Version::supports_adx()) {
7434 adcxq(tmp3, carry);
7435 adoxq(tmp3, yz_idx1);
7436
7437 adcxq(tmp4, tmp);
7438 adoxq(tmp4, yz_idx2);
7439
7440 movl(carry, 0); // does not affect flags
7441 adcxq(carry2, carry);
7442 adoxq(carry2, carry);
7443 } else {
7444 add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7445 add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7446 }
7447 movq(carry, carry2);
7448
7449 movl(Address(z, idx, Address::times_4, 12), tmp3);
7450 shrq(tmp3, 32);
7451 movl(Address(z, idx, Address::times_4, 8), tmp3);
7452
7453 movl(Address(z, idx, Address::times_4, 4), tmp4);
7454 shrq(tmp4, 32);
7455 movl(Address(z, idx, Address::times_4, 0), tmp4);
7456
7457 jmp(L_third_loop);
7458
7459 bind (L_third_loop_exit);
7460
7461 andl (idx, 0x3);
7462 jcc(Assembler::zero, L_post_third_loop_done);
7463
7464 Label L_check_1;
7465 subl(idx, 2);
7466 jcc(Assembler::negative, L_check_1);
7467
7468 movq(yz_idx1, Address(y, idx, Address::times_4, 0));
7469 rorxq(yz_idx1, yz_idx1, 32);
7470 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7471 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7472 rorxq(yz_idx2, yz_idx2, 32);
7473
7474 add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7475
7476 movl(Address(z, idx, Address::times_4, 4), tmp3);
7477 shrq(tmp3, 32);
7478 movl(Address(z, idx, Address::times_4, 0), tmp3);
7479 movq(carry, tmp4);
7480
7481 bind (L_check_1);
7482 addl (idx, 0x2);
7483 andl (idx, 0x1);
7484 subl(idx, 1);
7485 jcc(Assembler::negative, L_post_third_loop_done);
7486 movl(tmp4, Address(y, idx, Address::times_4, 0));
7487 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3
7488 movl(tmp4, Address(z, idx, Address::times_4, 0));
7489
7490 add2_with_carry(carry2, tmp3, tmp4, carry);
7491
7492 movl(Address(z, idx, Address::times_4, 0), tmp3);
7493 shrq(tmp3, 32);
7494
7495 shlq(carry2, 32);
7496 orq(tmp3, carry2);
7497 movq(carry, tmp3);
7498
7499 bind(L_post_third_loop_done);
7500 }
7501
7502 /**
7503 * Code for BigInteger::multiplyToLen() intrinsic.
7504 *
7505 * rdi: x
7506 * rax: xlen
7507 * rsi: y
7508 * rcx: ylen
7509 * r8: z
7510 * r11: tmp0
7511 * r12: tmp1
7512 * r13: tmp2
7513 * r14: tmp3
7514 * r15: tmp4
7515 * rbx: tmp5
7516 *
7517 */
7518 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
7519 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7520 ShortBranchVerifier sbv(this);
7521 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7522
7523 push(tmp0);
7524 push(tmp1);
7525 push(tmp2);
7526 push(tmp3);
7527 push(tmp4);
7528 push(tmp5);
7529
7530 push(xlen);
7531
7532 const Register idx = tmp1;
7533 const Register kdx = tmp2;
7534 const Register xstart = tmp3;
7535
7536 const Register y_idx = tmp4;
7537 const Register carry = tmp5;
7538 const Register product = xlen;
7539 const Register x_xstart = tmp0;
7540
7541 // First Loop.
7542 //
7543 // final static long LONG_MASK = 0xffffffffL;
7544 // int xstart = xlen - 1;
7545 // int ystart = ylen - 1;
7546 // long carry = 0;
7547 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7548 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7549 // z[kdx] = (int)product;
7550 // carry = product >>> 32;
7551 // }
7552 // z[xstart] = (int)carry;
7553 //
7554
7555 movl(idx, ylen); // idx = ylen;
7556 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
7557 xorq(carry, carry); // carry = 0;
7558
7559 Label L_done;
7560
7561 movl(xstart, xlen);
7562 decrementl(xstart);
7563 jcc(Assembler::negative, L_done);
7564
7565 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7566
7567 Label L_second_loop;
7568 testl(kdx, kdx);
7569 jcc(Assembler::zero, L_second_loop);
7570
7571 Label L_carry;
7572 subl(kdx, 1);
7573 jcc(Assembler::zero, L_carry);
7574
7575 movl(Address(z, kdx, Address::times_4, 0), carry);
7576 shrq(carry, 32);
7577 subl(kdx, 1);
7578
7579 bind(L_carry);
7580 movl(Address(z, kdx, Address::times_4, 0), carry);
7581
7582 // Second and third (nested) loops.
7583 //
7584 // for (int i = xstart-1; i >= 0; i--) { // Second loop
7585 // carry = 0;
7586 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7587 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7588 // (z[k] & LONG_MASK) + carry;
7589 // z[k] = (int)product;
7590 // carry = product >>> 32;
7591 // }
7592 // z[i] = (int)carry;
7593 // }
7594 //
7595 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7596
7597 const Register jdx = tmp1;
7598
7599 bind(L_second_loop);
7600 xorl(carry, carry); // carry = 0;
7601 movl(jdx, ylen); // j = ystart+1
7602
7603 subl(xstart, 1); // i = xstart-1;
7604 jcc(Assembler::negative, L_done);
7605
7606 push (z);
7607
7608 Label L_last_x;
7609 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7610 subl(xstart, 1); // i = xstart-1;
7611 jcc(Assembler::negative, L_last_x);
7612
7613 if (UseBMI2Instructions) {
7614 movq(rdx, Address(x, xstart, Address::times_4, 0));
7615 rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7616 } else {
7617 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7618 rorq(x_xstart, 32); // convert big-endian to little-endian
7619 }
7620
7621 Label L_third_loop_prologue;
7622 bind(L_third_loop_prologue);
7623
7624 push (x);
7625 push (xstart);
7626 push (ylen);
7627
7628
7629 if (UseBMI2Instructions) {
7630 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7631 } else { // !UseBMI2Instructions
7632 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7633 }
7634
7635 pop(ylen);
7636 pop(xlen);
7637 pop(x);
7638 pop(z);
7639
7640 movl(tmp3, xlen);
7641 addl(tmp3, 1);
7642 movl(Address(z, tmp3, Address::times_4, 0), carry);
7643 subl(tmp3, 1);
7644 jccb(Assembler::negative, L_done);
7645
7646 shrq(carry, 32);
7647 movl(Address(z, tmp3, Address::times_4, 0), carry);
7648 jmp(L_second_loop);
7649
7650 // Next infrequent code is moved outside loops.
7651 bind(L_last_x);
7652 if (UseBMI2Instructions) {
7653 movl(rdx, Address(x, 0));
7654 } else {
7655 movl(x_xstart, Address(x, 0));
7656 }
7657 jmp(L_third_loop_prologue);
7658
7659 bind(L_done);
7660
7661 pop(xlen);
7662
7663 pop(tmp5);
7664 pop(tmp4);
7665 pop(tmp3);
7666 pop(tmp2);
7667 pop(tmp1);
7668 pop(tmp0);
7669 }
7670
7671 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
7672 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
7673 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
7674 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
7675 Label VECTOR8_TAIL, VECTOR4_TAIL;
7676 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
7677 Label SAME_TILL_END, DONE;
7678 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
7679
7680 //scale is in rcx in both Win64 and Unix
7681 ShortBranchVerifier sbv(this);
7682
7683 shlq(length);
7684 xorq(result, result);
7685
7686 if ((AVX3Threshold == 0) && (UseAVX > 2) &&
7687 VM_Version::supports_avx512vlbw()) {
7688 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
7689
7690 cmpq(length, 64);
7691 jcc(Assembler::less, VECTOR32_TAIL);
7692
7693 movq(tmp1, length);
7694 andq(tmp1, 0x3F); // tail count
7695 andq(length, ~(0x3F)); //vector count
7696
7697 bind(VECTOR64_LOOP);
7698 // AVX512 code to compare 64 byte vectors.
7699 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
7700 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
7701 kortestql(k7, k7);
7702 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch
7703 addq(result, 64);
7704 subq(length, 64);
7705 jccb(Assembler::notZero, VECTOR64_LOOP);
7706
7707 //bind(VECTOR64_TAIL);
7708 testq(tmp1, tmp1);
7709 jcc(Assembler::zero, SAME_TILL_END);
7710
7711 //bind(VECTOR64_TAIL);
7712 // AVX512 code to compare up to 63 byte vectors.
7713 mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
7714 shlxq(tmp2, tmp2, tmp1);
7715 notq(tmp2);
7716 kmovql(k3, tmp2);
7717
7718 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
7719 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
7720
7721 ktestql(k7, k3);
7722 jcc(Assembler::below, SAME_TILL_END); // not mismatch
7723
7724 bind(VECTOR64_NOT_EQUAL);
7725 kmovql(tmp1, k7);
7726 notq(tmp1);
7727 tzcntq(tmp1, tmp1);
7728 addq(result, tmp1);
7729 shrq(result);
7730 jmp(DONE);
7731 bind(VECTOR32_TAIL);
7732 }
7733
7734 cmpq(length, 8);
7735 jcc(Assembler::equal, VECTOR8_LOOP);
7736 jcc(Assembler::less, VECTOR4_TAIL);
7737
7738 if (UseAVX >= 2) {
7739 Label VECTOR16_TAIL, VECTOR32_LOOP;
7740
7741 cmpq(length, 16);
7742 jcc(Assembler::equal, VECTOR16_LOOP);
7743 jcc(Assembler::less, VECTOR8_LOOP);
7744
7745 cmpq(length, 32);
7746 jccb(Assembler::less, VECTOR16_TAIL);
7747
7748 subq(length, 32);
7749 bind(VECTOR32_LOOP);
7750 vmovdqu(rymm0, Address(obja, result));
7751 vmovdqu(rymm1, Address(objb, result));
7752 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
7753 vptest(rymm2, rymm2);
7754 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
7755 addq(result, 32);
7756 subq(length, 32);
7757 jcc(Assembler::greaterEqual, VECTOR32_LOOP);
7758 addq(length, 32);
7759 jcc(Assembler::equal, SAME_TILL_END);
7760 //falling through if less than 32 bytes left //close the branch here.
7761
7762 bind(VECTOR16_TAIL);
7763 cmpq(length, 16);
7764 jccb(Assembler::less, VECTOR8_TAIL);
7765 bind(VECTOR16_LOOP);
7766 movdqu(rymm0, Address(obja, result));
7767 movdqu(rymm1, Address(objb, result));
7768 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
7769 ptest(rymm2, rymm2);
7770 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7771 addq(result, 16);
7772 subq(length, 16);
7773 jcc(Assembler::equal, SAME_TILL_END);
7774 //falling through if less than 16 bytes left
7775 } else {//regular intrinsics
7776
7777 cmpq(length, 16);
7778 jccb(Assembler::less, VECTOR8_TAIL);
7779
7780 subq(length, 16);
7781 bind(VECTOR16_LOOP);
7782 movdqu(rymm0, Address(obja, result));
7783 movdqu(rymm1, Address(objb, result));
7784 pxor(rymm0, rymm1);
7785 ptest(rymm0, rymm0);
7786 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7787 addq(result, 16);
7788 subq(length, 16);
7789 jccb(Assembler::greaterEqual, VECTOR16_LOOP);
7790 addq(length, 16);
7791 jcc(Assembler::equal, SAME_TILL_END);
7792 //falling through if less than 16 bytes left
7793 }
7794
7795 bind(VECTOR8_TAIL);
7796 cmpq(length, 8);
7797 jccb(Assembler::less, VECTOR4_TAIL);
7798 bind(VECTOR8_LOOP);
7799 movq(tmp1, Address(obja, result));
7800 movq(tmp2, Address(objb, result));
7801 xorq(tmp1, tmp2);
7802 testq(tmp1, tmp1);
7803 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
7804 addq(result, 8);
7805 subq(length, 8);
7806 jcc(Assembler::equal, SAME_TILL_END);
7807 //falling through if less than 8 bytes left
7808
7809 bind(VECTOR4_TAIL);
7810 cmpq(length, 4);
7811 jccb(Assembler::less, BYTES_TAIL);
7812 bind(VECTOR4_LOOP);
7813 movl(tmp1, Address(obja, result));
7814 xorl(tmp1, Address(objb, result));
7815 testl(tmp1, tmp1);
7816 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
7817 addq(result, 4);
7818 subq(length, 4);
7819 jcc(Assembler::equal, SAME_TILL_END);
7820 //falling through if less than 4 bytes left
7821
7822 bind(BYTES_TAIL);
7823 bind(BYTES_LOOP);
7824 load_unsigned_byte(tmp1, Address(obja, result));
7825 load_unsigned_byte(tmp2, Address(objb, result));
7826 xorl(tmp1, tmp2);
7827 testl(tmp1, tmp1);
7828 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7829 decq(length);
7830 jcc(Assembler::zero, SAME_TILL_END);
7831 incq(result);
7832 load_unsigned_byte(tmp1, Address(obja, result));
7833 load_unsigned_byte(tmp2, Address(objb, result));
7834 xorl(tmp1, tmp2);
7835 testl(tmp1, tmp1);
7836 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7837 decq(length);
7838 jcc(Assembler::zero, SAME_TILL_END);
7839 incq(result);
7840 load_unsigned_byte(tmp1, Address(obja, result));
7841 load_unsigned_byte(tmp2, Address(objb, result));
7842 xorl(tmp1, tmp2);
7843 testl(tmp1, tmp1);
7844 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7845 jmp(SAME_TILL_END);
7846
7847 if (UseAVX >= 2) {
7848 bind(VECTOR32_NOT_EQUAL);
7849 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
7850 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
7851 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
7852 vpmovmskb(tmp1, rymm0);
7853 bsfq(tmp1, tmp1);
7854 addq(result, tmp1);
7855 shrq(result);
7856 jmp(DONE);
7857 }
7858
7859 bind(VECTOR16_NOT_EQUAL);
7860 if (UseAVX >= 2) {
7861 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
7862 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
7863 pxor(rymm0, rymm2);
7864 } else {
7865 pcmpeqb(rymm2, rymm2);
7866 pxor(rymm0, rymm1);
7867 pcmpeqb(rymm0, rymm1);
7868 pxor(rymm0, rymm2);
7869 }
7870 pmovmskb(tmp1, rymm0);
7871 bsfq(tmp1, tmp1);
7872 addq(result, tmp1);
7873 shrq(result);
7874 jmpb(DONE);
7875
7876 bind(VECTOR8_NOT_EQUAL);
7877 bind(VECTOR4_NOT_EQUAL);
7878 bsfq(tmp1, tmp1);
7879 shrq(tmp1, 3);
7880 addq(result, tmp1);
7881 bind(BYTES_NOT_EQUAL);
7882 shrq(result);
7883 jmpb(DONE);
7884
7885 bind(SAME_TILL_END);
7886 mov64(result, -1);
7887
7888 bind(DONE);
7889 }
7890
7891 //Helper functions for square_to_len()
7892
7893 /**
7894 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7895 * Preserves x and z and modifies rest of the registers.
7896 */
7897 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7898 // Perform square and right shift by 1
7899 // Handle odd xlen case first, then for even xlen do the following
7900 // jlong carry = 0;
7901 // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7902 // huge_128 product = x[j:j+1] * x[j:j+1];
7903 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7904 // z[i+2:i+3] = (jlong)(product >>> 1);
7905 // carry = (jlong)product;
7906 // }
7907
7908 xorq(tmp5, tmp5); // carry
7909 xorq(rdxReg, rdxReg);
7910 xorl(tmp1, tmp1); // index for x
7911 xorl(tmp4, tmp4); // index for z
7912
7913 Label L_first_loop, L_first_loop_exit;
7914
7915 testl(xlen, 1);
7916 jccb(Assembler::zero, L_first_loop); //jump if xlen is even
7917
7918 // Square and right shift by 1 the odd element using 32 bit multiply
7919 movl(raxReg, Address(x, tmp1, Address::times_4, 0));
7920 imulq(raxReg, raxReg);
7921 shrq(raxReg, 1);
7922 adcq(tmp5, 0);
7923 movq(Address(z, tmp4, Address::times_4, 0), raxReg);
7924 incrementl(tmp1);
7925 addl(tmp4, 2);
7926
7927 // Square and right shift by 1 the rest using 64 bit multiply
7928 bind(L_first_loop);
7929 cmpptr(tmp1, xlen);
7930 jccb(Assembler::equal, L_first_loop_exit);
7931
7932 // Square
7933 movq(raxReg, Address(x, tmp1, Address::times_4, 0));
7934 rorq(raxReg, 32); // convert big-endian to little-endian
7935 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
7936
7937 // Right shift by 1 and save carry
7938 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
7939 rcrq(rdxReg, 1);
7940 rcrq(raxReg, 1);
7941 adcq(tmp5, 0);
7942
7943 // Store result in z
7944 movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
7945 movq(Address(z, tmp4, Address::times_4, 8), raxReg);
7946
7947 // Update indices for x and z
7948 addl(tmp1, 2);
7949 addl(tmp4, 4);
7950 jmp(L_first_loop);
7951
7952 bind(L_first_loop_exit);
7953 }
7954
7955
7956 /**
7957 * Perform the following multiply add operation using BMI2 instructions
7958 * carry:sum = sum + op1*op2 + carry
7959 * op2 should be in rdx
7960 * op2 is preserved, all other registers are modified
7961 */
7962 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
7963 // assert op2 is rdx
7964 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
7965 addq(sum, carry);
7966 adcq(tmp2, 0);
7967 addq(sum, op1);
7968 adcq(tmp2, 0);
7969 movq(carry, tmp2);
7970 }
7971
7972 /**
7973 * Perform the following multiply add operation:
7974 * carry:sum = sum + op1*op2 + carry
7975 * Preserves op1, op2 and modifies rest of registers
7976 */
7977 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
7978 // rdx:rax = op1 * op2
7979 movq(raxReg, op2);
7980 mulq(op1);
7981
7982 // rdx:rax = sum + carry + rdx:rax
7983 addq(sum, carry);
7984 adcq(rdxReg, 0);
7985 addq(sum, raxReg);
7986 adcq(rdxReg, 0);
7987
7988 // carry:sum = rdx:sum
7989 movq(carry, rdxReg);
7990 }
7991
7992 /**
7993 * Add 64 bit long carry into z[] with carry propagation.
7994 * Preserves z and carry register values and modifies rest of registers.
7995 *
7996 */
7997 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
7998 Label L_fourth_loop, L_fourth_loop_exit;
7999
8000 movl(tmp1, 1);
8001 subl(zlen, 2);
8002 addq(Address(z, zlen, Address::times_4, 0), carry);
8003
8004 bind(L_fourth_loop);
8005 jccb(Assembler::carryClear, L_fourth_loop_exit);
8006 subl(zlen, 2);
8007 jccb(Assembler::negative, L_fourth_loop_exit);
8008 addq(Address(z, zlen, Address::times_4, 0), tmp1);
8009 jmp(L_fourth_loop);
8010 bind(L_fourth_loop_exit);
8011 }
8012
8013 /**
8014 * Shift z[] left by 1 bit.
8015 * Preserves x, len, z and zlen registers and modifies rest of the registers.
8016 *
8017 */
8018 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
8019
8020 Label L_fifth_loop, L_fifth_loop_exit;
8021
8022 // Fifth loop
8023 // Perform primitiveLeftShift(z, zlen, 1)
8024
8025 const Register prev_carry = tmp1;
8026 const Register new_carry = tmp4;
8027 const Register value = tmp2;
8028 const Register zidx = tmp3;
8029
8030 // int zidx, carry;
8031 // long value;
8032 // carry = 0;
8033 // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
8034 // (carry:value) = (z[i] << 1) | carry ;
8035 // z[i] = value;
8036 // }
8037
8038 movl(zidx, zlen);
8039 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
8040
8041 bind(L_fifth_loop);
8042 decl(zidx); // Use decl to preserve carry flag
8043 decl(zidx);
8044 jccb(Assembler::negative, L_fifth_loop_exit);
8045
8046 if (UseBMI2Instructions) {
8047 movq(value, Address(z, zidx, Address::times_4, 0));
8048 rclq(value, 1);
8049 rorxq(value, value, 32);
8050 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8051 }
8052 else {
8053 // clear new_carry
8054 xorl(new_carry, new_carry);
8055
8056 // Shift z[i] by 1, or in previous carry and save new carry
8057 movq(value, Address(z, zidx, Address::times_4, 0));
8058 shlq(value, 1);
8059 adcl(new_carry, 0);
8060
8061 orq(value, prev_carry);
8062 rorq(value, 0x20);
8063 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
8064
8065 // Set previous carry = new carry
8066 movl(prev_carry, new_carry);
8067 }
8068 jmp(L_fifth_loop);
8069
8070 bind(L_fifth_loop_exit);
8071 }
8072
8073
8074 /**
8075 * Code for BigInteger::squareToLen() intrinsic
8076 *
8077 * rdi: x
8078 * rsi: len
8079 * r8: z
8080 * rcx: zlen
8081 * r12: tmp1
8082 * r13: tmp2
8083 * r14: tmp3
8084 * r15: tmp4
8085 * rbx: tmp5
8086 *
8087 */
8088 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8089
8090 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
8091 push(tmp1);
8092 push(tmp2);
8093 push(tmp3);
8094 push(tmp4);
8095 push(tmp5);
8096
8097 // First loop
8098 // Store the squares, right shifted one bit (i.e., divided by 2).
8099 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
8100
8101 // Add in off-diagonal sums.
8102 //
8103 // Second, third (nested) and fourth loops.
8104 // zlen +=2;
8105 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
8106 // carry = 0;
8107 // long op2 = x[xidx:xidx+1];
8108 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
8109 // k -= 2;
8110 // long op1 = x[j:j+1];
8111 // long sum = z[k:k+1];
8112 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
8113 // z[k:k+1] = sum;
8114 // }
8115 // add_one_64(z, k, carry, tmp_regs);
8116 // }
8117
8118 const Register carry = tmp5;
8119 const Register sum = tmp3;
8120 const Register op1 = tmp4;
8121 Register op2 = tmp2;
8122
8123 push(zlen);
8124 push(len);
8125 addl(zlen,2);
8126 bind(L_second_loop);
8127 xorq(carry, carry);
8128 subl(zlen, 4);
8129 subl(len, 2);
8130 push(zlen);
8131 push(len);
8132 cmpl(len, 0);
8133 jccb(Assembler::lessEqual, L_second_loop_exit);
8134
8135 // Multiply an array by one 64 bit long.
8136 if (UseBMI2Instructions) {
8137 op2 = rdxReg;
8138 movq(op2, Address(x, len, Address::times_4, 0));
8139 rorxq(op2, op2, 32);
8140 }
8141 else {
8142 movq(op2, Address(x, len, Address::times_4, 0));
8143 rorq(op2, 32);
8144 }
8145
8146 bind(L_third_loop);
8147 decrementl(len);
8148 jccb(Assembler::negative, L_third_loop_exit);
8149 decrementl(len);
8150 jccb(Assembler::negative, L_last_x);
8151
8152 movq(op1, Address(x, len, Address::times_4, 0));
8153 rorq(op1, 32);
8154
8155 bind(L_multiply);
8156 subl(zlen, 2);
8157 movq(sum, Address(z, zlen, Address::times_4, 0));
8158
8159 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
8160 if (UseBMI2Instructions) {
8161 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
8162 }
8163 else {
8164 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8165 }
8166
8167 movq(Address(z, zlen, Address::times_4, 0), sum);
8168
8169 jmp(L_third_loop);
8170 bind(L_third_loop_exit);
8171
8172 // Fourth loop
8173 // Add 64 bit long carry into z with carry propagation.
8174 // Uses offsetted zlen.
8175 add_one_64(z, zlen, carry, tmp1);
8176
8177 pop(len);
8178 pop(zlen);
8179 jmp(L_second_loop);
8180
8181 // Next infrequent code is moved outside loops.
8182 bind(L_last_x);
8183 movl(op1, Address(x, 0));
8184 jmp(L_multiply);
8185
8186 bind(L_second_loop_exit);
8187 pop(len);
8188 pop(zlen);
8189 pop(len);
8190 pop(zlen);
8191
8192 // Fifth loop
8193 // Shift z left 1 bit.
8194 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8195
8196 // z[zlen-1] |= x[len-1] & 1;
8197 movl(tmp3, Address(x, len, Address::times_4, -4));
8198 andl(tmp3, 1);
8199 orl(Address(z, zlen, Address::times_4, -4), tmp3);
8200
8201 pop(tmp5);
8202 pop(tmp4);
8203 pop(tmp3);
8204 pop(tmp2);
8205 pop(tmp1);
8206 }
8207
8208 /**
8209 * Helper function for mul_add()
8210 * Multiply the in[] by int k and add to out[] starting at offset offs using
8211 * 128 bit by 32 bit multiply and return the carry in tmp5.
8212 * Only quad int aligned length of in[] is operated on in this function.
8213 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8214 * This function preserves out, in and k registers.
8215 * len and offset point to the appropriate index in "in" & "out" correspondingly
8216 * tmp5 has the carry.
8217 * other registers are temporary and are modified.
8218 *
8219 */
8220 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8221 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8222 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8223
8224 Label L_first_loop, L_first_loop_exit;
8225
8226 movl(tmp1, len);
8227 shrl(tmp1, 2);
8228
8229 bind(L_first_loop);
8230 subl(tmp1, 1);
8231 jccb(Assembler::negative, L_first_loop_exit);
8232
8233 subl(len, 4);
8234 subl(offset, 4);
8235
8236 Register op2 = tmp2;
8237 const Register sum = tmp3;
8238 const Register op1 = tmp4;
8239 const Register carry = tmp5;
8240
8241 if (UseBMI2Instructions) {
8242 op2 = rdxReg;
8243 }
8244
8245 movq(op1, Address(in, len, Address::times_4, 8));
8246 rorq(op1, 32);
8247 movq(sum, Address(out, offset, Address::times_4, 8));
8248 rorq(sum, 32);
8249 if (UseBMI2Instructions) {
8250 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8251 }
8252 else {
8253 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8254 }
8255 // Store back in big endian from little endian
8256 rorq(sum, 0x20);
8257 movq(Address(out, offset, Address::times_4, 8), sum);
8258
8259 movq(op1, Address(in, len, Address::times_4, 0));
8260 rorq(op1, 32);
8261 movq(sum, Address(out, offset, Address::times_4, 0));
8262 rorq(sum, 32);
8263 if (UseBMI2Instructions) {
8264 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8265 }
8266 else {
8267 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8268 }
8269 // Store back in big endian from little endian
8270 rorq(sum, 0x20);
8271 movq(Address(out, offset, Address::times_4, 0), sum);
8272
8273 jmp(L_first_loop);
8274 bind(L_first_loop_exit);
8275 }
8276
8277 /**
8278 * Code for BigInteger::mulAdd() intrinsic
8279 *
8280 * rdi: out
8281 * rsi: in
8282 * r11: offs (out.length - offset)
8283 * rcx: len
8284 * r8: k
8285 * r12: tmp1
8286 * r13: tmp2
8287 * r14: tmp3
8288 * r15: tmp4
8289 * rbx: tmp5
8290 * Multiply the in[] by word k and add to out[], return the carry in rax
8291 */
8292 void MacroAssembler::mul_add(Register out, Register in, Register offs,
8293 Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8294 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8295
8296 Label L_carry, L_last_in, L_done;
8297
8298 // carry = 0;
8299 // for (int j=len-1; j >= 0; j--) {
8300 // long product = (in[j] & LONG_MASK) * kLong +
8301 // (out[offs] & LONG_MASK) + carry;
8302 // out[offs--] = (int)product;
8303 // carry = product >>> 32;
8304 // }
8305 //
8306 push(tmp1);
8307 push(tmp2);
8308 push(tmp3);
8309 push(tmp4);
8310 push(tmp5);
8311
8312 Register op2 = tmp2;
8313 const Register sum = tmp3;
8314 const Register op1 = tmp4;
8315 const Register carry = tmp5;
8316
8317 if (UseBMI2Instructions) {
8318 op2 = rdxReg;
8319 movl(op2, k);
8320 }
8321 else {
8322 movl(op2, k);
8323 }
8324
8325 xorq(carry, carry);
8326
8327 //First loop
8328
8329 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8330 //The carry is in tmp5
8331 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8332
8333 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8334 decrementl(len);
8335 jccb(Assembler::negative, L_carry);
8336 decrementl(len);
8337 jccb(Assembler::negative, L_last_in);
8338
8339 movq(op1, Address(in, len, Address::times_4, 0));
8340 rorq(op1, 32);
8341
8342 subl(offs, 2);
8343 movq(sum, Address(out, offs, Address::times_4, 0));
8344 rorq(sum, 32);
8345
8346 if (UseBMI2Instructions) {
8347 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8348 }
8349 else {
8350 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8351 }
8352
8353 // Store back in big endian from little endian
8354 rorq(sum, 0x20);
8355 movq(Address(out, offs, Address::times_4, 0), sum);
8356
8357 testl(len, len);
8358 jccb(Assembler::zero, L_carry);
8359
8360 //Multiply the last in[] entry, if any
8361 bind(L_last_in);
8362 movl(op1, Address(in, 0));
8363 movl(sum, Address(out, offs, Address::times_4, -4));
8364
8365 movl(raxReg, k);
8366 mull(op1); //tmp4 * eax -> edx:eax
8367 addl(sum, carry);
8368 adcl(rdxReg, 0);
8369 addl(sum, raxReg);
8370 adcl(rdxReg, 0);
8371 movl(carry, rdxReg);
8372
8373 movl(Address(out, offs, Address::times_4, -4), sum);
8374
8375 bind(L_carry);
8376 //return tmp5/carry as carry in rax
8377 movl(rax, carry);
8378
8379 bind(L_done);
8380 pop(tmp5);
8381 pop(tmp4);
8382 pop(tmp3);
8383 pop(tmp2);
8384 pop(tmp1);
8385 }
8386
8387 /**
8388 * Emits code to update CRC-32 with a byte value according to constants in table
8389 *
8390 * @param [in,out]crc Register containing the crc.
8391 * @param [in]val Register containing the byte to fold into the CRC.
8392 * @param [in]table Register containing the table of crc constants.
8393 *
8394 * uint32_t crc;
8395 * val = crc_table[(val ^ crc) & 0xFF];
8396 * crc = val ^ (crc >> 8);
8397 *
8398 */
8399 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8400 xorl(val, crc);
8401 andl(val, 0xFF);
8402 shrl(crc, 8); // unsigned shift
8403 xorl(crc, Address(table, val, Address::times_4, 0));
8404 }
8405
8406 /**
8407 * Fold 128-bit data chunk
8408 */
8409 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8410 if (UseAVX > 0) {
8411 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8412 vpclmulldq(xcrc, xK, xcrc); // [63:0]
8413 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
8414 pxor(xcrc, xtmp);
8415 } else {
8416 movdqa(xtmp, xcrc);
8417 pclmulhdq(xtmp, xK); // [123:64]
8418 pclmulldq(xcrc, xK); // [63:0]
8419 pxor(xcrc, xtmp);
8420 movdqu(xtmp, Address(buf, offset));
8421 pxor(xcrc, xtmp);
8422 }
8423 }
8424
8425 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8426 if (UseAVX > 0) {
8427 vpclmulhdq(xtmp, xK, xcrc);
8428 vpclmulldq(xcrc, xK, xcrc);
8429 pxor(xcrc, xbuf);
8430 pxor(xcrc, xtmp);
8431 } else {
8432 movdqa(xtmp, xcrc);
8433 pclmulhdq(xtmp, xK);
8434 pclmulldq(xcrc, xK);
8435 pxor(xcrc, xbuf);
8436 pxor(xcrc, xtmp);
8437 }
8438 }
8439
8440 /**
8441 * 8-bit folds to compute 32-bit CRC
8442 *
8443 * uint64_t xcrc;
8444 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8445 */
8446 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8447 movdl(tmp, xcrc);
8448 andl(tmp, 0xFF);
8449 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8450 psrldq(xcrc, 1); // unsigned shift one byte
8451 pxor(xcrc, xtmp);
8452 }
8453
8454 /**
8455 * uint32_t crc;
8456 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8457 */
8458 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8459 movl(tmp, crc);
8460 andl(tmp, 0xFF);
8461 shrl(crc, 8);
8462 xorl(crc, Address(table, tmp, Address::times_4, 0));
8463 }
8464
8465 /**
8466 * @param crc register containing existing CRC (32-bit)
8467 * @param buf register pointing to input byte buffer (byte*)
8468 * @param len register containing number of bytes
8469 * @param table register that will contain address of CRC table
8470 * @param tmp scratch register
8471 */
8472 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8473 assert_different_registers(crc, buf, len, table, tmp, rax);
8474
8475 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8476 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8477
8478 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8479 // context for the registers used, where all instructions below are using 128-bit mode
8480 // On EVEX without VL and BW, these instructions will all be AVX.
8481 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8482 notl(crc); // ~crc
8483 cmpl(len, 16);
8484 jcc(Assembler::less, L_tail);
8485
8486 // Align buffer to 16 bytes
8487 movl(tmp, buf);
8488 andl(tmp, 0xF);
8489 jccb(Assembler::zero, L_aligned);
8490 subl(tmp, 16);
8491 addl(len, tmp);
8492
8493 align(4);
8494 BIND(L_align_loop);
8495 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8496 update_byte_crc32(crc, rax, table);
8497 increment(buf);
8498 incrementl(tmp);
8499 jccb(Assembler::less, L_align_loop);
8500
8501 BIND(L_aligned);
8502 movl(tmp, len); // save
8503 shrl(len, 4);
8504 jcc(Assembler::zero, L_tail_restore);
8505
8506 // Fold crc into first bytes of vector
8507 movdqa(xmm1, Address(buf, 0));
8508 movdl(rax, xmm1);
8509 xorl(crc, rax);
8510 if (VM_Version::supports_sse4_1()) {
8511 pinsrd(xmm1, crc, 0);
8512 } else {
8513 pinsrw(xmm1, crc, 0);
8514 shrl(crc, 16);
8515 pinsrw(xmm1, crc, 1);
8516 }
8517 addptr(buf, 16);
8518 subl(len, 4); // len > 0
8519 jcc(Assembler::less, L_fold_tail);
8520
8521 movdqa(xmm2, Address(buf, 0));
8522 movdqa(xmm3, Address(buf, 16));
8523 movdqa(xmm4, Address(buf, 32));
8524 addptr(buf, 48);
8525 subl(len, 3);
8526 jcc(Assembler::lessEqual, L_fold_512b);
8527
8528 // Fold total 512 bits of polynomial on each iteration,
8529 // 128 bits per each of 4 parallel streams.
8530 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
8531
8532 align32();
8533 BIND(L_fold_512b_loop);
8534 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8535 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8536 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8537 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8538 addptr(buf, 64);
8539 subl(len, 4);
8540 jcc(Assembler::greater, L_fold_512b_loop);
8541
8542 // Fold 512 bits to 128 bits.
8543 BIND(L_fold_512b);
8544 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8545 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8546 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8547 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8548
8549 // Fold the rest of 128 bits data chunks
8550 BIND(L_fold_tail);
8551 addl(len, 3);
8552 jccb(Assembler::lessEqual, L_fold_128b);
8553 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8554
8555 BIND(L_fold_tail_loop);
8556 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8557 addptr(buf, 16);
8558 decrementl(len);
8559 jccb(Assembler::greater, L_fold_tail_loop);
8560
8561 // Fold 128 bits in xmm1 down into 32 bits in crc register.
8562 BIND(L_fold_128b);
8563 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
8564 if (UseAVX > 0) {
8565 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8566 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
8567 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8568 } else {
8569 movdqa(xmm2, xmm0);
8570 pclmulqdq(xmm2, xmm1, 0x1);
8571 movdqa(xmm3, xmm0);
8572 pand(xmm3, xmm2);
8573 pclmulqdq(xmm0, xmm3, 0x1);
8574 }
8575 psrldq(xmm1, 8);
8576 psrldq(xmm2, 4);
8577 pxor(xmm0, xmm1);
8578 pxor(xmm0, xmm2);
8579
8580 // 8 8-bit folds to compute 32-bit CRC.
8581 for (int j = 0; j < 4; j++) {
8582 fold_8bit_crc32(xmm0, table, xmm1, rax);
8583 }
8584 movdl(crc, xmm0); // mov 32 bits to general register
8585 for (int j = 0; j < 4; j++) {
8586 fold_8bit_crc32(crc, table, rax);
8587 }
8588
8589 BIND(L_tail_restore);
8590 movl(len, tmp); // restore
8591 BIND(L_tail);
8592 andl(len, 0xf);
8593 jccb(Assembler::zero, L_exit);
8594
8595 // Fold the rest of bytes
8596 align(4);
8597 BIND(L_tail_loop);
8598 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8599 update_byte_crc32(crc, rax, table);
8600 increment(buf);
8601 decrementl(len);
8602 jccb(Assembler::greater, L_tail_loop);
8603
8604 BIND(L_exit);
8605 notl(crc); // ~c
8606 }
8607
8608 // Helper function for AVX 512 CRC32
8609 // Fold 512-bit data chunks
8610 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
8611 Register pos, int offset) {
8612 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
8613 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
8614 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
8615 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
8616 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
8617 }
8618
8619 // Helper function for AVX 512 CRC32
8620 // Compute CRC32 for < 256B buffers
8621 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
8622 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
8623 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
8624
8625 Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
8626 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
8627 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
8628
8629 // check if there is enough buffer to be able to fold 16B at a time
8630 cmpl(len, 32);
8631 jcc(Assembler::less, L_less_than_32);
8632
8633 // if there is, load the constants
8634 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10
8635 movdl(xmm0, crc); // get the initial crc value
8636 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8637 pxor(xmm7, xmm0);
8638
8639 // update the buffer pointer
8640 addl(pos, 16);
8641 //update the counter.subtract 32 instead of 16 to save one instruction from the loop
8642 subl(len, 32);
8643 jmp(L_16B_reduction_loop);
8644
8645 bind(L_less_than_32);
8646 //mov initial crc to the return value. this is necessary for zero - length buffers.
8647 movl(rax, crc);
8648 testl(len, len);
8649 jcc(Assembler::equal, L_cleanup);
8650
8651 movdl(xmm0, crc); //get the initial crc value
8652
8653 cmpl(len, 16);
8654 jcc(Assembler::equal, L_exact_16_left);
8655 jcc(Assembler::less, L_less_than_16_left);
8656
8657 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8658 pxor(xmm7, xmm0); //xor the initial crc value
8659 addl(pos, 16);
8660 subl(len, 16);
8661 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10
8662 jmp(L_get_last_two_xmms);
8663
8664 bind(L_less_than_16_left);
8665 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
8666 pxor(xmm1, xmm1);
8667 movptr(tmp1, rsp);
8668 movdqu(Address(tmp1, 0 * 16), xmm1);
8669
8670 cmpl(len, 4);
8671 jcc(Assembler::less, L_only_less_than_4);
8672
8673 //backup the counter value
8674 movl(tmp2, len);
8675 cmpl(len, 8);
8676 jcc(Assembler::less, L_less_than_8_left);
8677
8678 //load 8 Bytes
8679 movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
8680 movq(Address(tmp1, 0 * 16), rax);
8681 addptr(tmp1, 8);
8682 subl(len, 8);
8683 addl(pos, 8);
8684
8685 bind(L_less_than_8_left);
8686 cmpl(len, 4);
8687 jcc(Assembler::less, L_less_than_4_left);
8688
8689 //load 4 Bytes
8690 movl(rax, Address(buf, pos, Address::times_1, 0));
8691 movl(Address(tmp1, 0 * 16), rax);
8692 addptr(tmp1, 4);
8693 subl(len, 4);
8694 addl(pos, 4);
8695
8696 bind(L_less_than_4_left);
8697 cmpl(len, 2);
8698 jcc(Assembler::less, L_less_than_2_left);
8699
8700 // load 2 Bytes
8701 movw(rax, Address(buf, pos, Address::times_1, 0));
8702 movl(Address(tmp1, 0 * 16), rax);
8703 addptr(tmp1, 2);
8704 subl(len, 2);
8705 addl(pos, 2);
8706
8707 bind(L_less_than_2_left);
8708 cmpl(len, 1);
8709 jcc(Assembler::less, L_zero_left);
8710
8711 // load 1 Byte
8712 movb(rax, Address(buf, pos, Address::times_1, 0));
8713 movb(Address(tmp1, 0 * 16), rax);
8714
8715 bind(L_zero_left);
8716 movdqu(xmm7, Address(rsp, 0));
8717 pxor(xmm7, xmm0); //xor the initial crc value
8718
8719 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8720 movdqu(xmm0, Address(rax, tmp2));
8721 pshufb(xmm7, xmm0);
8722 jmp(L_128_done);
8723
8724 bind(L_exact_16_left);
8725 movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
8726 pxor(xmm7, xmm0); //xor the initial crc value
8727 jmp(L_128_done);
8728
8729 bind(L_only_less_than_4);
8730 cmpl(len, 3);
8731 jcc(Assembler::less, L_only_less_than_3);
8732
8733 // load 3 Bytes
8734 movb(rax, Address(buf, pos, Address::times_1, 0));
8735 movb(Address(tmp1, 0), rax);
8736
8737 movb(rax, Address(buf, pos, Address::times_1, 1));
8738 movb(Address(tmp1, 1), rax);
8739
8740 movb(rax, Address(buf, pos, Address::times_1, 2));
8741 movb(Address(tmp1, 2), rax);
8742
8743 movdqu(xmm7, Address(rsp, 0));
8744 pxor(xmm7, xmm0); //xor the initial crc value
8745
8746 pslldq(xmm7, 0x5);
8747 jmp(L_barrett);
8748 bind(L_only_less_than_3);
8749 cmpl(len, 2);
8750 jcc(Assembler::less, L_only_less_than_2);
8751
8752 // load 2 Bytes
8753 movb(rax, Address(buf, pos, Address::times_1, 0));
8754 movb(Address(tmp1, 0), rax);
8755
8756 movb(rax, Address(buf, pos, Address::times_1, 1));
8757 movb(Address(tmp1, 1), rax);
8758
8759 movdqu(xmm7, Address(rsp, 0));
8760 pxor(xmm7, xmm0); //xor the initial crc value
8761
8762 pslldq(xmm7, 0x6);
8763 jmp(L_barrett);
8764
8765 bind(L_only_less_than_2);
8766 //load 1 Byte
8767 movb(rax, Address(buf, pos, Address::times_1, 0));
8768 movb(Address(tmp1, 0), rax);
8769
8770 movdqu(xmm7, Address(rsp, 0));
8771 pxor(xmm7, xmm0); //xor the initial crc value
8772
8773 pslldq(xmm7, 0x7);
8774 }
8775
8776 /**
8777 * Compute CRC32 using AVX512 instructions
8778 * param crc register containing existing CRC (32-bit)
8779 * param buf register pointing to input byte buffer (byte*)
8780 * param len register containing number of bytes
8781 * param table address of crc or crc32c table
8782 * param tmp1 scratch register
8783 * param tmp2 scratch register
8784 * return rax result register
8785 *
8786 * This routine is identical for crc32c with the exception of the precomputed constant
8787 * table which will be passed as the table argument. The calculation steps are
8788 * the same for both variants.
8789 */
8790 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
8791 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
8792
8793 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8794 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8795 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
8796 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
8797 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
8798
8799 const Register pos = r12;
8800 push(r12);
8801 subptr(rsp, 16 * 2 + 8);
8802
8803 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8804 // context for the registers used, where all instructions below are using 128-bit mode
8805 // On EVEX without VL and BW, these instructions will all be AVX.
8806 movl(pos, 0);
8807
8808 // check if smaller than 256B
8809 cmpl(len, 256);
8810 jcc(Assembler::less, L_less_than_256);
8811
8812 // load the initial crc value
8813 movdl(xmm10, crc);
8814
8815 // receive the initial 64B data, xor the initial crc value
8816 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
8817 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
8818 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
8819 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
8820
8821 subl(len, 256);
8822 cmpl(len, 256);
8823 jcc(Assembler::less, L_fold_128_B_loop);
8824
8825 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
8826 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
8827 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
8828 subl(len, 256);
8829
8830 bind(L_fold_256_B_loop);
8831 addl(pos, 256);
8832 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
8833 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
8834 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
8835 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
8836
8837 subl(len, 256);
8838 jcc(Assembler::greaterEqual, L_fold_256_B_loop);
8839
8840 // Fold 256 into 128
8841 addl(pos, 256);
8842 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
8843 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
8844 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
8845
8846 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
8847 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
8848 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
8849
8850 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
8851 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
8852
8853 addl(len, 128);
8854 jmp(L_fold_128_B_register);
8855
8856 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
8857 // loop will fold 128B at a time until we have 128 + y Bytes of buffer
8858
8859 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
8860 bind(L_fold_128_B_loop);
8861 addl(pos, 128);
8862 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
8863 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
8864
8865 subl(len, 128);
8866 jcc(Assembler::greaterEqual, L_fold_128_B_loop);
8867
8868 addl(pos, 128);
8869
8870 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
8871 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
8872 bind(L_fold_128_B_register);
8873 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
8874 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
8875 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
8876 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
8877 // save last that has no multiplicand
8878 vextracti64x2(xmm7, xmm4, 3);
8879
8880 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
8881 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
8882 // Needed later in reduction loop
8883 movdqu(xmm10, Address(table, 1 * 16));
8884 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
8885 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
8886
8887 // Swap 1,0,3,2 - 01 00 11 10
8888 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
8889 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
8890 vextracti128(xmm5, xmm8, 1);
8891 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
8892
8893 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
8894 // instead of a cmp instruction, we use the negative flag with the jl instruction
8895 addl(len, 128 - 16);
8896 jcc(Assembler::less, L_final_reduction_for_128);
8897
8898 bind(L_16B_reduction_loop);
8899 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8900 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8901 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8902 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
8903 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8904 addl(pos, 16);
8905 subl(len, 16);
8906 jcc(Assembler::greaterEqual, L_16B_reduction_loop);
8907
8908 bind(L_final_reduction_for_128);
8909 addl(len, 16);
8910 jcc(Assembler::equal, L_128_done);
8911
8912 bind(L_get_last_two_xmms);
8913 movdqu(xmm2, xmm7);
8914 addl(pos, len);
8915 movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
8916 subl(pos, len);
8917
8918 // get rid of the extra data that was loaded before
8919 // load the shift constant
8920 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8921 movdqu(xmm0, Address(rax, len));
8922 addl(rax, len);
8923
8924 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8925 //Change mask to 512
8926 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
8927 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
8928
8929 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
8930 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8931 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8932 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8933 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
8934
8935 bind(L_128_done);
8936 // compute crc of a 128-bit value
8937 movdqu(xmm10, Address(table, 3 * 16));
8938 movdqu(xmm0, xmm7);
8939
8940 // 64b fold
8941 vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
8942 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
8943 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8944
8945 // 32b fold
8946 movdqu(xmm0, xmm7);
8947 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
8948 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8949 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8950 jmp(L_barrett);
8951
8952 bind(L_less_than_256);
8953 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
8954
8955 //barrett reduction
8956 bind(L_barrett);
8957 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
8958 movdqu(xmm1, xmm7);
8959 movdqu(xmm2, xmm7);
8960 movdqu(xmm10, Address(table, 4 * 16));
8961
8962 pclmulqdq(xmm7, xmm10, 0x0);
8963 pxor(xmm7, xmm2);
8964 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
8965 movdqu(xmm2, xmm7);
8966 pclmulqdq(xmm7, xmm10, 0x10);
8967 pxor(xmm7, xmm2);
8968 pxor(xmm7, xmm1);
8969 pextrd(crc, xmm7, 2);
8970
8971 bind(L_cleanup);
8972 addptr(rsp, 16 * 2 + 8);
8973 pop(r12);
8974 }
8975
8976 // S. Gueron / Information Processing Letters 112 (2012) 184
8977 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
8978 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
8979 // Output: the 64-bit carry-less product of B * CONST
8980 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
8981 Register tmp1, Register tmp2, Register tmp3) {
8982 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
8983 if (n > 0) {
8984 addq(tmp3, n * 256 * 8);
8985 }
8986 // Q1 = TABLEExt[n][B & 0xFF];
8987 movl(tmp1, in);
8988 andl(tmp1, 0x000000FF);
8989 shll(tmp1, 3);
8990 addq(tmp1, tmp3);
8991 movq(tmp1, Address(tmp1, 0));
8992
8993 // Q2 = TABLEExt[n][B >> 8 & 0xFF];
8994 movl(tmp2, in);
8995 shrl(tmp2, 8);
8996 andl(tmp2, 0x000000FF);
8997 shll(tmp2, 3);
8998 addq(tmp2, tmp3);
8999 movq(tmp2, Address(tmp2, 0));
9000
9001 shlq(tmp2, 8);
9002 xorq(tmp1, tmp2);
9003
9004 // Q3 = TABLEExt[n][B >> 16 & 0xFF];
9005 movl(tmp2, in);
9006 shrl(tmp2, 16);
9007 andl(tmp2, 0x000000FF);
9008 shll(tmp2, 3);
9009 addq(tmp2, tmp3);
9010 movq(tmp2, Address(tmp2, 0));
9011
9012 shlq(tmp2, 16);
9013 xorq(tmp1, tmp2);
9014
9015 // Q4 = TABLEExt[n][B >> 24 & 0xFF];
9016 shrl(in, 24);
9017 andl(in, 0x000000FF);
9018 shll(in, 3);
9019 addq(in, tmp3);
9020 movq(in, Address(in, 0));
9021
9022 shlq(in, 24);
9023 xorq(in, tmp1);
9024 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
9025 }
9026
9027 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
9028 Register in_out,
9029 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
9030 XMMRegister w_xtmp2,
9031 Register tmp1,
9032 Register n_tmp2, Register n_tmp3) {
9033 if (is_pclmulqdq_supported) {
9034 movdl(w_xtmp1, in_out); // modified blindly
9035
9036 movl(tmp1, const_or_pre_comp_const_index);
9037 movdl(w_xtmp2, tmp1);
9038 pclmulqdq(w_xtmp1, w_xtmp2, 0);
9039
9040 movdq(in_out, w_xtmp1);
9041 } else {
9042 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
9043 }
9044 }
9045
9046 // Recombination Alternative 2: No bit-reflections
9047 // T1 = (CRC_A * U1) << 1
9048 // T2 = (CRC_B * U2) << 1
9049 // C1 = T1 >> 32
9050 // C2 = T2 >> 32
9051 // T1 = T1 & 0xFFFFFFFF
9052 // T2 = T2 & 0xFFFFFFFF
9053 // T1 = CRC32(0, T1)
9054 // T2 = CRC32(0, T2)
9055 // C1 = C1 ^ T1
9056 // C2 = C2 ^ T2
9057 // CRC = C1 ^ C2 ^ CRC_C
9058 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
9059 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9060 Register tmp1, Register tmp2,
9061 Register n_tmp3) {
9062 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9063 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
9064 shlq(in_out, 1);
9065 movl(tmp1, in_out);
9066 shrq(in_out, 32);
9067 xorl(tmp2, tmp2);
9068 crc32(tmp2, tmp1, 4);
9069 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
9070 shlq(in1, 1);
9071 movl(tmp1, in1);
9072 shrq(in1, 32);
9073 xorl(tmp2, tmp2);
9074 crc32(tmp2, tmp1, 4);
9075 xorl(in1, tmp2);
9076 xorl(in_out, in1);
9077 xorl(in_out, in2);
9078 }
9079
9080 // Set N to predefined value
9081 // Subtract from a length of a buffer
9082 // execute in a loop:
9083 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
9084 // for i = 1 to N do
9085 // CRC_A = CRC32(CRC_A, A[i])
9086 // CRC_B = CRC32(CRC_B, B[i])
9087 // CRC_C = CRC32(CRC_C, C[i])
9088 // end for
9089 // Recombine
9090 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
9091 Register in_out1, Register in_out2, Register in_out3,
9092 Register tmp1, Register tmp2, Register tmp3,
9093 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9094 Register tmp4, Register tmp5,
9095 Register n_tmp6) {
9096 Label L_processPartitions;
9097 Label L_processPartition;
9098 Label L_exit;
9099
9100 bind(L_processPartitions);
9101 cmpl(in_out1, 3 * size);
9102 jcc(Assembler::less, L_exit);
9103 xorl(tmp1, tmp1);
9104 xorl(tmp2, tmp2);
9105 movq(tmp3, in_out2);
9106 addq(tmp3, size);
9107
9108 bind(L_processPartition);
9109 crc32(in_out3, Address(in_out2, 0), 8);
9110 crc32(tmp1, Address(in_out2, size), 8);
9111 crc32(tmp2, Address(in_out2, size * 2), 8);
9112 addq(in_out2, 8);
9113 cmpq(in_out2, tmp3);
9114 jcc(Assembler::less, L_processPartition);
9115 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
9116 w_xtmp1, w_xtmp2, w_xtmp3,
9117 tmp4, tmp5,
9118 n_tmp6);
9119 addq(in_out2, 2 * size);
9120 subl(in_out1, 3 * size);
9121 jmp(L_processPartitions);
9122
9123 bind(L_exit);
9124 }
9125
9126 // Algorithm 2: Pipelined usage of the CRC32 instruction.
9127 // Input: A buffer I of L bytes.
9128 // Output: the CRC32C value of the buffer.
9129 // Notations:
9130 // Write L = 24N + r, with N = floor (L/24).
9131 // r = L mod 24 (0 <= r < 24).
9132 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
9133 // N quadwords, and R consists of r bytes.
9134 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
9135 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
9136 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
9137 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
9138 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
9139 Register tmp1, Register tmp2, Register tmp3,
9140 Register tmp4, Register tmp5, Register tmp6,
9141 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9142 bool is_pclmulqdq_supported) {
9143 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
9144 Label L_wordByWord;
9145 Label L_byteByByteProlog;
9146 Label L_byteByByte;
9147 Label L_exit;
9148
9149 if (is_pclmulqdq_supported ) {
9150 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
9151 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
9152
9153 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
9154 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
9155
9156 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
9157 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
9158 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
9159 } else {
9160 const_or_pre_comp_const_index[0] = 1;
9161 const_or_pre_comp_const_index[1] = 0;
9162
9163 const_or_pre_comp_const_index[2] = 3;
9164 const_or_pre_comp_const_index[3] = 2;
9165
9166 const_or_pre_comp_const_index[4] = 5;
9167 const_or_pre_comp_const_index[5] = 4;
9168 }
9169 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
9170 in2, in1, in_out,
9171 tmp1, tmp2, tmp3,
9172 w_xtmp1, w_xtmp2, w_xtmp3,
9173 tmp4, tmp5,
9174 tmp6);
9175 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
9176 in2, in1, in_out,
9177 tmp1, tmp2, tmp3,
9178 w_xtmp1, w_xtmp2, w_xtmp3,
9179 tmp4, tmp5,
9180 tmp6);
9181 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
9182 in2, in1, in_out,
9183 tmp1, tmp2, tmp3,
9184 w_xtmp1, w_xtmp2, w_xtmp3,
9185 tmp4, tmp5,
9186 tmp6);
9187 movl(tmp1, in2);
9188 andl(tmp1, 0x00000007);
9189 negl(tmp1);
9190 addl(tmp1, in2);
9191 addq(tmp1, in1);
9192
9193 cmpq(in1, tmp1);
9194 jccb(Assembler::greaterEqual, L_byteByByteProlog);
9195 align(16);
9196 BIND(L_wordByWord);
9197 crc32(in_out, Address(in1, 0), 8);
9198 addq(in1, 8);
9199 cmpq(in1, tmp1);
9200 jcc(Assembler::less, L_wordByWord);
9201
9202 BIND(L_byteByByteProlog);
9203 andl(in2, 0x00000007);
9204 movl(tmp2, 1);
9205
9206 cmpl(tmp2, in2);
9207 jccb(Assembler::greater, L_exit);
9208 BIND(L_byteByByte);
9209 crc32(in_out, Address(in1, 0), 1);
9210 incq(in1);
9211 incl(tmp2);
9212 cmpl(tmp2, in2);
9213 jcc(Assembler::lessEqual, L_byteByByte);
9214
9215 BIND(L_exit);
9216 }
9217 #undef BIND
9218 #undef BLOCK_COMMENT
9219
9220 // Compress char[] array to byte[].
9221 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
9222 // Return the array length if every element in array can be encoded,
9223 // otherwise, the index of first non-latin1 (> 0xff) character.
9224 // @IntrinsicCandidate
9225 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
9226 // for (int i = 0; i < len; i++) {
9227 // char c = src[srcOff];
9228 // if (c > 0xff) {
9229 // return i; // return index of non-latin1 char
9230 // }
9231 // dst[dstOff] = (byte)c;
9232 // srcOff++;
9233 // dstOff++;
9234 // }
9235 // return len;
9236 // }
9237 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
9238 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
9239 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
9240 Register tmp5, Register result, KRegister mask1, KRegister mask2) {
9241 Label copy_chars_loop, done, reset_sp, copy_tail;
9242
9243 // rsi: src
9244 // rdi: dst
9245 // rdx: len
9246 // rcx: tmp5
9247 // rax: result
9248
9249 // rsi holds start addr of source char[] to be compressed
9250 // rdi holds start addr of destination byte[]
9251 // rdx holds length
9252
9253 assert(len != result, "");
9254
9255 // save length for return
9256 movl(result, len);
9257
9258 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
9259 VM_Version::supports_avx512vlbw() &&
9260 VM_Version::supports_bmi2()) {
9261
9262 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
9263
9264 // alignment
9265 Label post_alignment;
9266
9267 // if length of the string is less than 32, handle it the old fashioned way
9268 testl(len, -32);
9269 jcc(Assembler::zero, below_threshold);
9270
9271 // First check whether a character is compressible ( <= 0xFF).
9272 // Create mask to test for Unicode chars inside zmm vector
9273 movl(tmp5, 0x00FF);
9274 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
9275
9276 testl(len, -64);
9277 jccb(Assembler::zero, post_alignment);
9278
9279 movl(tmp5, dst);
9280 andl(tmp5, (32 - 1));
9281 negl(tmp5);
9282 andl(tmp5, (32 - 1));
9283
9284 // bail out when there is nothing to be done
9285 testl(tmp5, 0xFFFFFFFF);
9286 jccb(Assembler::zero, post_alignment);
9287
9288 // ~(~0 << len), where len is the # of remaining elements to process
9289 movl(len, 0xFFFFFFFF);
9290 shlxl(len, len, tmp5);
9291 notl(len);
9292 kmovdl(mask2, len);
9293 movl(len, result);
9294
9295 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9296 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9297 ktestd(mask1, mask2);
9298 jcc(Assembler::carryClear, copy_tail);
9299
9300 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9301
9302 addptr(src, tmp5);
9303 addptr(src, tmp5);
9304 addptr(dst, tmp5);
9305 subl(len, tmp5);
9306
9307 bind(post_alignment);
9308 // end of alignment
9309
9310 movl(tmp5, len);
9311 andl(tmp5, (32 - 1)); // tail count (in chars)
9312 andl(len, ~(32 - 1)); // vector count (in chars)
9313 jccb(Assembler::zero, copy_loop_tail);
9314
9315 lea(src, Address(src, len, Address::times_2));
9316 lea(dst, Address(dst, len, Address::times_1));
9317 negptr(len);
9318
9319 bind(copy_32_loop);
9320 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
9321 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
9322 kortestdl(mask1, mask1);
9323 jccb(Assembler::carryClear, reset_for_copy_tail);
9324
9325 // All elements in current processed chunk are valid candidates for
9326 // compression. Write a truncated byte elements to the memory.
9327 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
9328 addptr(len, 32);
9329 jccb(Assembler::notZero, copy_32_loop);
9330
9331 bind(copy_loop_tail);
9332 // bail out when there is nothing to be done
9333 testl(tmp5, 0xFFFFFFFF);
9334 jcc(Assembler::zero, done);
9335
9336 movl(len, tmp5);
9337
9338 // ~(~0 << len), where len is the # of remaining elements to process
9339 movl(tmp5, 0xFFFFFFFF);
9340 shlxl(tmp5, tmp5, len);
9341 notl(tmp5);
9342
9343 kmovdl(mask2, tmp5);
9344
9345 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9346 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9347 ktestd(mask1, mask2);
9348 jcc(Assembler::carryClear, copy_tail);
9349
9350 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9351 jmp(done);
9352
9353 bind(reset_for_copy_tail);
9354 lea(src, Address(src, tmp5, Address::times_2));
9355 lea(dst, Address(dst, tmp5, Address::times_1));
9356 subptr(len, tmp5);
9357 jmp(copy_chars_loop);
9358
9359 bind(below_threshold);
9360 }
9361
9362 if (UseSSE42Intrinsics) {
9363 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
9364
9365 // vectored compression
9366 testl(len, 0xfffffff8);
9367 jcc(Assembler::zero, copy_tail);
9368
9369 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors
9370 movdl(tmp1Reg, tmp5);
9371 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg
9372
9373 andl(len, 0xfffffff0);
9374 jccb(Assembler::zero, copy_16);
9375
9376 // compress 16 chars per iter
9377 pxor(tmp4Reg, tmp4Reg);
9378
9379 lea(src, Address(src, len, Address::times_2));
9380 lea(dst, Address(dst, len, Address::times_1));
9381 negptr(len);
9382
9383 bind(copy_32_loop);
9384 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters
9385 por(tmp4Reg, tmp2Reg);
9386 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
9387 por(tmp4Reg, tmp3Reg);
9388 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector
9389 jccb(Assembler::notZero, reset_for_copy_tail);
9390 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte
9391 movdqu(Address(dst, len, Address::times_1), tmp2Reg);
9392 addptr(len, 16);
9393 jccb(Assembler::notZero, copy_32_loop);
9394
9395 // compress next vector of 8 chars (if any)
9396 bind(copy_16);
9397 // len = 0
9398 testl(result, 0x00000008); // check if there's a block of 8 chars to compress
9399 jccb(Assembler::zero, copy_tail_sse);
9400
9401 pxor(tmp3Reg, tmp3Reg);
9402
9403 movdqu(tmp2Reg, Address(src, 0));
9404 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
9405 jccb(Assembler::notZero, reset_for_copy_tail);
9406 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte
9407 movq(Address(dst, 0), tmp2Reg);
9408 addptr(src, 16);
9409 addptr(dst, 8);
9410 jmpb(copy_tail_sse);
9411
9412 bind(reset_for_copy_tail);
9413 movl(tmp5, result);
9414 andl(tmp5, 0x0000000f);
9415 lea(src, Address(src, tmp5, Address::times_2));
9416 lea(dst, Address(dst, tmp5, Address::times_1));
9417 subptr(len, tmp5);
9418 jmpb(copy_chars_loop);
9419
9420 bind(copy_tail_sse);
9421 movl(len, result);
9422 andl(len, 0x00000007); // tail count (in chars)
9423 }
9424 // compress 1 char per iter
9425 bind(copy_tail);
9426 testl(len, len);
9427 jccb(Assembler::zero, done);
9428 lea(src, Address(src, len, Address::times_2));
9429 lea(dst, Address(dst, len, Address::times_1));
9430 negptr(len);
9431
9432 bind(copy_chars_loop);
9433 load_unsigned_short(tmp5, Address(src, len, Address::times_2));
9434 testl(tmp5, 0xff00); // check if Unicode char
9435 jccb(Assembler::notZero, reset_sp);
9436 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte
9437 increment(len);
9438 jccb(Assembler::notZero, copy_chars_loop);
9439
9440 // add len then return (len will be zero if compress succeeded, otherwise negative)
9441 bind(reset_sp);
9442 addl(result, len);
9443
9444 bind(done);
9445 }
9446
9447 // Inflate byte[] array to char[].
9448 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
9449 // @IntrinsicCandidate
9450 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
9451 // for (int i = 0; i < len; i++) {
9452 // dst[dstOff++] = (char)(src[srcOff++] & 0xff);
9453 // }
9454 // }
9455 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
9456 XMMRegister tmp1, Register tmp2, KRegister mask) {
9457 Label copy_chars_loop, done, below_threshold, avx3_threshold;
9458 // rsi: src
9459 // rdi: dst
9460 // rdx: len
9461 // rcx: tmp2
9462
9463 // rsi holds start addr of source byte[] to be inflated
9464 // rdi holds start addr of destination char[]
9465 // rdx holds length
9466 assert_different_registers(src, dst, len, tmp2);
9467 movl(tmp2, len);
9468 if ((UseAVX > 2) && // AVX512
9469 VM_Version::supports_avx512vlbw() &&
9470 VM_Version::supports_bmi2()) {
9471
9472 Label copy_32_loop, copy_tail;
9473 Register tmp3_aliased = len;
9474
9475 // if length of the string is less than 16, handle it in an old fashioned way
9476 testl(len, -16);
9477 jcc(Assembler::zero, below_threshold);
9478
9479 testl(len, -1 * AVX3Threshold);
9480 jcc(Assembler::zero, avx3_threshold);
9481
9482 // In order to use only one arithmetic operation for the main loop we use
9483 // this pre-calculation
9484 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
9485 andl(len, -32); // vector count
9486 jccb(Assembler::zero, copy_tail);
9487
9488 lea(src, Address(src, len, Address::times_1));
9489 lea(dst, Address(dst, len, Address::times_2));
9490 negptr(len);
9491
9492
9493 // inflate 32 chars per iter
9494 bind(copy_32_loop);
9495 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
9496 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
9497 addptr(len, 32);
9498 jcc(Assembler::notZero, copy_32_loop);
9499
9500 bind(copy_tail);
9501 // bail out when there is nothing to be done
9502 testl(tmp2, -1); // we don't destroy the contents of tmp2 here
9503 jcc(Assembler::zero, done);
9504
9505 // ~(~0 << length), where length is the # of remaining elements to process
9506 movl(tmp3_aliased, -1);
9507 shlxl(tmp3_aliased, tmp3_aliased, tmp2);
9508 notl(tmp3_aliased);
9509 kmovdl(mask, tmp3_aliased);
9510 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
9511 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
9512
9513 jmp(done);
9514 bind(avx3_threshold);
9515 }
9516 if (UseSSE42Intrinsics) {
9517 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
9518
9519 if (UseAVX > 1) {
9520 andl(tmp2, (16 - 1));
9521 andl(len, -16);
9522 jccb(Assembler::zero, copy_new_tail);
9523 } else {
9524 andl(tmp2, 0x00000007); // tail count (in chars)
9525 andl(len, 0xfffffff8); // vector count (in chars)
9526 jccb(Assembler::zero, copy_tail);
9527 }
9528
9529 // vectored inflation
9530 lea(src, Address(src, len, Address::times_1));
9531 lea(dst, Address(dst, len, Address::times_2));
9532 negptr(len);
9533
9534 if (UseAVX > 1) {
9535 bind(copy_16_loop);
9536 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
9537 vmovdqu(Address(dst, len, Address::times_2), tmp1);
9538 addptr(len, 16);
9539 jcc(Assembler::notZero, copy_16_loop);
9540
9541 bind(below_threshold);
9542 bind(copy_new_tail);
9543 movl(len, tmp2);
9544 andl(tmp2, 0x00000007);
9545 andl(len, 0xFFFFFFF8);
9546 jccb(Assembler::zero, copy_tail);
9547
9548 pmovzxbw(tmp1, Address(src, 0));
9549 movdqu(Address(dst, 0), tmp1);
9550 addptr(src, 8);
9551 addptr(dst, 2 * 8);
9552
9553 jmp(copy_tail, true);
9554 }
9555
9556 // inflate 8 chars per iter
9557 bind(copy_8_loop);
9558 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words
9559 movdqu(Address(dst, len, Address::times_2), tmp1);
9560 addptr(len, 8);
9561 jcc(Assembler::notZero, copy_8_loop);
9562
9563 bind(copy_tail);
9564 movl(len, tmp2);
9565
9566 cmpl(len, 4);
9567 jccb(Assembler::less, copy_bytes);
9568
9569 movdl(tmp1, Address(src, 0)); // load 4 byte chars
9570 pmovzxbw(tmp1, tmp1);
9571 movq(Address(dst, 0), tmp1);
9572 subptr(len, 4);
9573 addptr(src, 4);
9574 addptr(dst, 8);
9575
9576 bind(copy_bytes);
9577 } else {
9578 bind(below_threshold);
9579 }
9580
9581 testl(len, len);
9582 jccb(Assembler::zero, done);
9583 lea(src, Address(src, len, Address::times_1));
9584 lea(dst, Address(dst, len, Address::times_2));
9585 negptr(len);
9586
9587 // inflate 1 char per iter
9588 bind(copy_chars_loop);
9589 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char
9590 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word
9591 increment(len);
9592 jcc(Assembler::notZero, copy_chars_loop);
9593
9594 bind(done);
9595 }
9596
9597 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
9598 switch(type) {
9599 case T_BYTE:
9600 case T_BOOLEAN:
9601 evmovdqub(dst, kmask, src, merge, vector_len);
9602 break;
9603 case T_CHAR:
9604 case T_SHORT:
9605 evmovdquw(dst, kmask, src, merge, vector_len);
9606 break;
9607 case T_INT:
9608 case T_FLOAT:
9609 evmovdqul(dst, kmask, src, merge, vector_len);
9610 break;
9611 case T_LONG:
9612 case T_DOUBLE:
9613 evmovdquq(dst, kmask, src, merge, vector_len);
9614 break;
9615 default:
9616 fatal("Unexpected type argument %s", type2name(type));
9617 break;
9618 }
9619 }
9620
9621
9622 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
9623 switch(type) {
9624 case T_BYTE:
9625 case T_BOOLEAN:
9626 evmovdqub(dst, kmask, src, merge, vector_len);
9627 break;
9628 case T_CHAR:
9629 case T_SHORT:
9630 evmovdquw(dst, kmask, src, merge, vector_len);
9631 break;
9632 case T_INT:
9633 case T_FLOAT:
9634 evmovdqul(dst, kmask, src, merge, vector_len);
9635 break;
9636 case T_LONG:
9637 case T_DOUBLE:
9638 evmovdquq(dst, kmask, src, merge, vector_len);
9639 break;
9640 default:
9641 fatal("Unexpected type argument %s", type2name(type));
9642 break;
9643 }
9644 }
9645
9646 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
9647 switch(type) {
9648 case T_BYTE:
9649 case T_BOOLEAN:
9650 evmovdqub(dst, kmask, src, merge, vector_len);
9651 break;
9652 case T_CHAR:
9653 case T_SHORT:
9654 evmovdquw(dst, kmask, src, merge, vector_len);
9655 break;
9656 case T_INT:
9657 case T_FLOAT:
9658 evmovdqul(dst, kmask, src, merge, vector_len);
9659 break;
9660 case T_LONG:
9661 case T_DOUBLE:
9662 evmovdquq(dst, kmask, src, merge, vector_len);
9663 break;
9664 default:
9665 fatal("Unexpected type argument %s", type2name(type));
9666 break;
9667 }
9668 }
9669
9670 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
9671 switch(masklen) {
9672 case 2:
9673 knotbl(dst, src);
9674 movl(rtmp, 3);
9675 kmovbl(ktmp, rtmp);
9676 kandbl(dst, ktmp, dst);
9677 break;
9678 case 4:
9679 knotbl(dst, src);
9680 movl(rtmp, 15);
9681 kmovbl(ktmp, rtmp);
9682 kandbl(dst, ktmp, dst);
9683 break;
9684 case 8:
9685 knotbl(dst, src);
9686 break;
9687 case 16:
9688 knotwl(dst, src);
9689 break;
9690 case 32:
9691 knotdl(dst, src);
9692 break;
9693 case 64:
9694 knotql(dst, src);
9695 break;
9696 default:
9697 fatal("Unexpected vector length %d", masklen);
9698 break;
9699 }
9700 }
9701
9702 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9703 switch(type) {
9704 case T_BOOLEAN:
9705 case T_BYTE:
9706 kandbl(dst, src1, src2);
9707 break;
9708 case T_CHAR:
9709 case T_SHORT:
9710 kandwl(dst, src1, src2);
9711 break;
9712 case T_INT:
9713 case T_FLOAT:
9714 kanddl(dst, src1, src2);
9715 break;
9716 case T_LONG:
9717 case T_DOUBLE:
9718 kandql(dst, src1, src2);
9719 break;
9720 default:
9721 fatal("Unexpected type argument %s", type2name(type));
9722 break;
9723 }
9724 }
9725
9726 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9727 switch(type) {
9728 case T_BOOLEAN:
9729 case T_BYTE:
9730 korbl(dst, src1, src2);
9731 break;
9732 case T_CHAR:
9733 case T_SHORT:
9734 korwl(dst, src1, src2);
9735 break;
9736 case T_INT:
9737 case T_FLOAT:
9738 kordl(dst, src1, src2);
9739 break;
9740 case T_LONG:
9741 case T_DOUBLE:
9742 korql(dst, src1, src2);
9743 break;
9744 default:
9745 fatal("Unexpected type argument %s", type2name(type));
9746 break;
9747 }
9748 }
9749
9750 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9751 switch(type) {
9752 case T_BOOLEAN:
9753 case T_BYTE:
9754 kxorbl(dst, src1, src2);
9755 break;
9756 case T_CHAR:
9757 case T_SHORT:
9758 kxorwl(dst, src1, src2);
9759 break;
9760 case T_INT:
9761 case T_FLOAT:
9762 kxordl(dst, src1, src2);
9763 break;
9764 case T_LONG:
9765 case T_DOUBLE:
9766 kxorql(dst, src1, src2);
9767 break;
9768 default:
9769 fatal("Unexpected type argument %s", type2name(type));
9770 break;
9771 }
9772 }
9773
9774 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9775 switch(type) {
9776 case T_BOOLEAN:
9777 case T_BYTE:
9778 evpermb(dst, mask, nds, src, merge, vector_len); break;
9779 case T_CHAR:
9780 case T_SHORT:
9781 evpermw(dst, mask, nds, src, merge, vector_len); break;
9782 case T_INT:
9783 case T_FLOAT:
9784 evpermd(dst, mask, nds, src, merge, vector_len); break;
9785 case T_LONG:
9786 case T_DOUBLE:
9787 evpermq(dst, mask, nds, src, merge, vector_len); break;
9788 default:
9789 fatal("Unexpected type argument %s", type2name(type)); break;
9790 }
9791 }
9792
9793 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9794 switch(type) {
9795 case T_BOOLEAN:
9796 case T_BYTE:
9797 evpermb(dst, mask, nds, src, merge, vector_len); break;
9798 case T_CHAR:
9799 case T_SHORT:
9800 evpermw(dst, mask, nds, src, merge, vector_len); break;
9801 case T_INT:
9802 case T_FLOAT:
9803 evpermd(dst, mask, nds, src, merge, vector_len); break;
9804 case T_LONG:
9805 case T_DOUBLE:
9806 evpermq(dst, mask, nds, src, merge, vector_len); break;
9807 default:
9808 fatal("Unexpected type argument %s", type2name(type)); break;
9809 }
9810 }
9811
9812 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9813 switch(type) {
9814 case T_BYTE:
9815 evpminub(dst, mask, nds, src, merge, vector_len); break;
9816 case T_SHORT:
9817 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9818 case T_INT:
9819 evpminud(dst, mask, nds, src, merge, vector_len); break;
9820 case T_LONG:
9821 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9822 default:
9823 fatal("Unexpected type argument %s", type2name(type)); break;
9824 }
9825 }
9826
9827 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9828 switch(type) {
9829 case T_BYTE:
9830 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9831 case T_SHORT:
9832 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9833 case T_INT:
9834 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9835 case T_LONG:
9836 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9837 default:
9838 fatal("Unexpected type argument %s", type2name(type)); break;
9839 }
9840 }
9841
9842 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9843 switch(type) {
9844 case T_BYTE:
9845 evpminub(dst, mask, nds, src, merge, vector_len); break;
9846 case T_SHORT:
9847 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9848 case T_INT:
9849 evpminud(dst, mask, nds, src, merge, vector_len); break;
9850 case T_LONG:
9851 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9852 default:
9853 fatal("Unexpected type argument %s", type2name(type)); break;
9854 }
9855 }
9856
9857 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9858 switch(type) {
9859 case T_BYTE:
9860 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9861 case T_SHORT:
9862 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9863 case T_INT:
9864 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9865 case T_LONG:
9866 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9867 default:
9868 fatal("Unexpected type argument %s", type2name(type)); break;
9869 }
9870 }
9871
9872 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9873 switch(type) {
9874 case T_BYTE:
9875 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9876 case T_SHORT:
9877 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9878 case T_INT:
9879 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9880 case T_LONG:
9881 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9882 case T_FLOAT:
9883 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9884 case T_DOUBLE:
9885 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9886 default:
9887 fatal("Unexpected type argument %s", type2name(type)); break;
9888 }
9889 }
9890
9891 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9892 switch(type) {
9893 case T_BYTE:
9894 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9895 case T_SHORT:
9896 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9897 case T_INT:
9898 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9899 case T_LONG:
9900 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9901 case T_FLOAT:
9902 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9903 case T_DOUBLE:
9904 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9905 default:
9906 fatal("Unexpected type argument %s", type2name(type)); break;
9907 }
9908 }
9909
9910 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9911 switch(type) {
9912 case T_BYTE:
9913 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9914 case T_SHORT:
9915 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9916 case T_INT:
9917 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9918 case T_LONG:
9919 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9920 case T_FLOAT:
9921 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9922 case T_DOUBLE:
9923 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9924 default:
9925 fatal("Unexpected type argument %s", type2name(type)); break;
9926 }
9927 }
9928
9929 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9930 switch(type) {
9931 case T_BYTE:
9932 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9933 case T_SHORT:
9934 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9935 case T_INT:
9936 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9937 case T_LONG:
9938 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9939 case T_FLOAT:
9940 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9941 case T_DOUBLE:
9942 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9943 default:
9944 fatal("Unexpected type argument %s", type2name(type)); break;
9945 }
9946 }
9947
9948 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9949 switch(type) {
9950 case T_INT:
9951 evpxord(dst, mask, nds, src, merge, vector_len); break;
9952 case T_LONG:
9953 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9954 default:
9955 fatal("Unexpected type argument %s", type2name(type)); break;
9956 }
9957 }
9958
9959 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9960 switch(type) {
9961 case T_INT:
9962 evpxord(dst, mask, nds, src, merge, vector_len); break;
9963 case T_LONG:
9964 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9965 default:
9966 fatal("Unexpected type argument %s", type2name(type)); break;
9967 }
9968 }
9969
9970 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9971 switch(type) {
9972 case T_INT:
9973 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9974 case T_LONG:
9975 evporq(dst, mask, nds, src, merge, vector_len); break;
9976 default:
9977 fatal("Unexpected type argument %s", type2name(type)); break;
9978 }
9979 }
9980
9981 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9982 switch(type) {
9983 case T_INT:
9984 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9985 case T_LONG:
9986 evporq(dst, mask, nds, src, merge, vector_len); break;
9987 default:
9988 fatal("Unexpected type argument %s", type2name(type)); break;
9989 }
9990 }
9991
9992 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9993 switch(type) {
9994 case T_INT:
9995 evpandd(dst, mask, nds, src, merge, vector_len); break;
9996 case T_LONG:
9997 evpandq(dst, mask, nds, src, merge, vector_len); break;
9998 default:
9999 fatal("Unexpected type argument %s", type2name(type)); break;
10000 }
10001 }
10002
10003 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10004 switch(type) {
10005 case T_INT:
10006 evpandd(dst, mask, nds, src, merge, vector_len); break;
10007 case T_LONG:
10008 evpandq(dst, mask, nds, src, merge, vector_len); break;
10009 default:
10010 fatal("Unexpected type argument %s", type2name(type)); break;
10011 }
10012 }
10013
10014 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
10015 switch(masklen) {
10016 case 8:
10017 kortestbl(src1, src2);
10018 break;
10019 case 16:
10020 kortestwl(src1, src2);
10021 break;
10022 case 32:
10023 kortestdl(src1, src2);
10024 break;
10025 case 64:
10026 kortestql(src1, src2);
10027 break;
10028 default:
10029 fatal("Unexpected mask length %d", masklen);
10030 break;
10031 }
10032 }
10033
10034
10035 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
10036 switch(masklen) {
10037 case 8:
10038 ktestbl(src1, src2);
10039 break;
10040 case 16:
10041 ktestwl(src1, src2);
10042 break;
10043 case 32:
10044 ktestdl(src1, src2);
10045 break;
10046 case 64:
10047 ktestql(src1, src2);
10048 break;
10049 default:
10050 fatal("Unexpected mask length %d", masklen);
10051 break;
10052 }
10053 }
10054
10055 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10056 switch(type) {
10057 case T_INT:
10058 evprold(dst, mask, src, shift, merge, vlen_enc); break;
10059 case T_LONG:
10060 evprolq(dst, mask, src, shift, merge, vlen_enc); break;
10061 default:
10062 fatal("Unexpected type argument %s", type2name(type)); break;
10063 break;
10064 }
10065 }
10066
10067 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
10068 switch(type) {
10069 case T_INT:
10070 evprord(dst, mask, src, shift, merge, vlen_enc); break;
10071 case T_LONG:
10072 evprorq(dst, mask, src, shift, merge, vlen_enc); break;
10073 default:
10074 fatal("Unexpected type argument %s", type2name(type)); break;
10075 }
10076 }
10077
10078 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10079 switch(type) {
10080 case T_INT:
10081 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
10082 case T_LONG:
10083 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
10084 default:
10085 fatal("Unexpected type argument %s", type2name(type)); break;
10086 }
10087 }
10088
10089 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
10090 switch(type) {
10091 case T_INT:
10092 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
10093 case T_LONG:
10094 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
10095 default:
10096 fatal("Unexpected type argument %s", type2name(type)); break;
10097 }
10098 }
10099
10100 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10101 assert(rscratch != noreg || always_reachable(src), "missing");
10102
10103 if (reachable(src)) {
10104 evpandq(dst, nds, as_Address(src), vector_len);
10105 } else {
10106 lea(rscratch, src);
10107 evpandq(dst, nds, Address(rscratch, 0), vector_len);
10108 }
10109 }
10110
10111 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
10112 assert(rscratch != noreg || always_reachable(src), "missing");
10113
10114 if (reachable(src)) {
10115 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
10116 } else {
10117 lea(rscratch, src);
10118 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
10119 }
10120 }
10121
10122 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10123 assert(rscratch != noreg || always_reachable(src), "missing");
10124
10125 if (reachable(src)) {
10126 evporq(dst, nds, as_Address(src), vector_len);
10127 } else {
10128 lea(rscratch, src);
10129 evporq(dst, nds, Address(rscratch, 0), vector_len);
10130 }
10131 }
10132
10133 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10134 assert(rscratch != noreg || always_reachable(src), "missing");
10135
10136 if (reachable(src)) {
10137 vpshufb(dst, nds, as_Address(src), vector_len);
10138 } else {
10139 lea(rscratch, src);
10140 vpshufb(dst, nds, Address(rscratch, 0), vector_len);
10141 }
10142 }
10143
10144 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
10145 assert(rscratch != noreg || always_reachable(src), "missing");
10146
10147 if (reachable(src)) {
10148 Assembler::vpor(dst, nds, as_Address(src), vector_len);
10149 } else {
10150 lea(rscratch, src);
10151 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
10152 }
10153 }
10154
10155 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
10156 assert(rscratch != noreg || always_reachable(src3), "missing");
10157
10158 if (reachable(src3)) {
10159 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
10160 } else {
10161 lea(rscratch, src3);
10162 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
10163 }
10164 }
10165
10166 #if COMPILER2_OR_JVMCI
10167
10168 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
10169 Register length, Register temp, int vec_enc) {
10170 // Computing mask for predicated vector store.
10171 movptr(temp, -1);
10172 bzhiq(temp, temp, length);
10173 kmov(mask, temp);
10174 evmovdqu(bt, mask, dst, xmm, true, vec_enc);
10175 }
10176
10177 // Set memory operation for length "less than" 64 bytes.
10178 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
10179 XMMRegister xmm, KRegister mask, Register length,
10180 Register temp, bool use64byteVector) {
10181 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10182 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10183 if (!use64byteVector) {
10184 fill32(dst, disp, xmm);
10185 subptr(length, 32 >> shift);
10186 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
10187 } else {
10188 assert(MaxVectorSize == 64, "vector length != 64");
10189 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
10190 }
10191 }
10192
10193
10194 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
10195 XMMRegister xmm, KRegister mask, Register length,
10196 Register temp) {
10197 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10198 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10199 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
10200 }
10201
10202
10203 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
10204 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10205 vmovdqu(dst, xmm);
10206 }
10207
10208 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
10209 fill32(Address(dst, disp), xmm);
10210 }
10211
10212 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
10213 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10214 if (!use64byteVector) {
10215 fill32(dst, xmm);
10216 fill32(dst.plus_disp(32), xmm);
10217 } else {
10218 evmovdquq(dst, xmm, Assembler::AVX_512bit);
10219 }
10220 }
10221
10222 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
10223 fill64(Address(dst, disp), xmm, use64byteVector);
10224 }
10225
10226 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
10227 Register count, Register rtmp, XMMRegister xtmp) {
10228 Label L_exit;
10229 Label L_fill_start;
10230 Label L_fill_64_bytes;
10231 Label L_fill_96_bytes;
10232 Label L_fill_128_bytes;
10233 Label L_fill_128_bytes_loop;
10234 Label L_fill_128_loop_header;
10235 Label L_fill_128_bytes_loop_header;
10236 Label L_fill_128_bytes_loop_pre_header;
10237 Label L_fill_zmm_sequence;
10238
10239 int shift = -1;
10240 int avx3threshold = VM_Version::avx3_threshold();
10241 switch(type) {
10242 case T_BYTE: shift = 0;
10243 break;
10244 case T_SHORT: shift = 1;
10245 break;
10246 case T_INT: shift = 2;
10247 break;
10248 /* Uncomment when LONG fill stubs are supported.
10249 case T_LONG: shift = 3;
10250 break;
10251 */
10252 default:
10253 fatal("Unhandled type: %s\n", type2name(type));
10254 }
10255
10256 if ((avx3threshold != 0) || (MaxVectorSize == 32)) {
10257
10258 if (MaxVectorSize == 64) {
10259 cmpq(count, avx3threshold >> shift);
10260 jcc(Assembler::greater, L_fill_zmm_sequence);
10261 }
10262
10263 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
10264
10265 bind(L_fill_start);
10266
10267 cmpq(count, 32 >> shift);
10268 jccb(Assembler::greater, L_fill_64_bytes);
10269 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
10270 jmp(L_exit);
10271
10272 bind(L_fill_64_bytes);
10273 cmpq(count, 64 >> shift);
10274 jccb(Assembler::greater, L_fill_96_bytes);
10275 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
10276 jmp(L_exit);
10277
10278 bind(L_fill_96_bytes);
10279 cmpq(count, 96 >> shift);
10280 jccb(Assembler::greater, L_fill_128_bytes);
10281 fill64(to, 0, xtmp);
10282 subq(count, 64 >> shift);
10283 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
10284 jmp(L_exit);
10285
10286 bind(L_fill_128_bytes);
10287 cmpq(count, 128 >> shift);
10288 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
10289 fill64(to, 0, xtmp);
10290 fill32(to, 64, xtmp);
10291 subq(count, 96 >> shift);
10292 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
10293 jmp(L_exit);
10294
10295 bind(L_fill_128_bytes_loop_pre_header);
10296 {
10297 mov(rtmp, to);
10298 andq(rtmp, 31);
10299 jccb(Assembler::zero, L_fill_128_bytes_loop_header);
10300 negq(rtmp);
10301 addq(rtmp, 32);
10302 mov64(r8, -1L);
10303 bzhiq(r8, r8, rtmp);
10304 kmovql(k2, r8);
10305 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10306 addq(to, rtmp);
10307 shrq(rtmp, shift);
10308 subq(count, rtmp);
10309 }
10310
10311 cmpq(count, 128 >> shift);
10312 jcc(Assembler::less, L_fill_start);
10313
10314 bind(L_fill_128_bytes_loop_header);
10315 subq(count, 128 >> shift);
10316
10317 align32();
10318 bind(L_fill_128_bytes_loop);
10319 fill64(to, 0, xtmp);
10320 fill64(to, 64, xtmp);
10321 addq(to, 128);
10322 subq(count, 128 >> shift);
10323 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10324
10325 addq(count, 128 >> shift);
10326 jcc(Assembler::zero, L_exit);
10327 jmp(L_fill_start);
10328 }
10329
10330 if (MaxVectorSize == 64) {
10331 // Sequence using 64 byte ZMM register.
10332 Label L_fill_128_bytes_zmm;
10333 Label L_fill_192_bytes_zmm;
10334 Label L_fill_192_bytes_loop_zmm;
10335 Label L_fill_192_bytes_loop_header_zmm;
10336 Label L_fill_192_bytes_loop_pre_header_zmm;
10337 Label L_fill_start_zmm_sequence;
10338
10339 bind(L_fill_zmm_sequence);
10340 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10341
10342 bind(L_fill_start_zmm_sequence);
10343 cmpq(count, 64 >> shift);
10344 jccb(Assembler::greater, L_fill_128_bytes_zmm);
10345 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10346 jmp(L_exit);
10347
10348 bind(L_fill_128_bytes_zmm);
10349 cmpq(count, 128 >> shift);
10350 jccb(Assembler::greater, L_fill_192_bytes_zmm);
10351 fill64(to, 0, xtmp, true);
10352 subq(count, 64 >> shift);
10353 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10354 jmp(L_exit);
10355
10356 bind(L_fill_192_bytes_zmm);
10357 cmpq(count, 192 >> shift);
10358 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10359 fill64(to, 0, xtmp, true);
10360 fill64(to, 64, xtmp, true);
10361 subq(count, 128 >> shift);
10362 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10363 jmp(L_exit);
10364
10365 bind(L_fill_192_bytes_loop_pre_header_zmm);
10366 {
10367 movq(rtmp, to);
10368 andq(rtmp, 63);
10369 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10370 negq(rtmp);
10371 addq(rtmp, 64);
10372 mov64(r8, -1L);
10373 bzhiq(r8, r8, rtmp);
10374 kmovql(k2, r8);
10375 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10376 addq(to, rtmp);
10377 shrq(rtmp, shift);
10378 subq(count, rtmp);
10379 }
10380
10381 cmpq(count, 192 >> shift);
10382 jcc(Assembler::less, L_fill_start_zmm_sequence);
10383
10384 bind(L_fill_192_bytes_loop_header_zmm);
10385 subq(count, 192 >> shift);
10386
10387 align32();
10388 bind(L_fill_192_bytes_loop_zmm);
10389 fill64(to, 0, xtmp, true);
10390 fill64(to, 64, xtmp, true);
10391 fill64(to, 128, xtmp, true);
10392 addq(to, 192);
10393 subq(count, 192 >> shift);
10394 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10395
10396 addq(count, 192 >> shift);
10397 jcc(Assembler::zero, L_exit);
10398 jmp(L_fill_start_zmm_sequence);
10399 }
10400 bind(L_exit);
10401 }
10402 #endif //COMPILER2_OR_JVMCI
10403
10404
10405 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10406 Label done;
10407 cvttss2sil(dst, src);
10408 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10409 cmpl(dst, 0x80000000); // float_sign_flip
10410 jccb(Assembler::notEqual, done);
10411 subptr(rsp, 8);
10412 movflt(Address(rsp, 0), src);
10413 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10414 pop(dst);
10415 bind(done);
10416 }
10417
10418 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10419 Label done;
10420 cvttsd2sil(dst, src);
10421 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10422 cmpl(dst, 0x80000000); // float_sign_flip
10423 jccb(Assembler::notEqual, done);
10424 subptr(rsp, 8);
10425 movdbl(Address(rsp, 0), src);
10426 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10427 pop(dst);
10428 bind(done);
10429 }
10430
10431 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10432 Label done;
10433 cvttss2siq(dst, src);
10434 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10435 jccb(Assembler::notEqual, done);
10436 subptr(rsp, 8);
10437 movflt(Address(rsp, 0), src);
10438 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10439 pop(dst);
10440 bind(done);
10441 }
10442
10443 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10444 // Following code is line by line assembly translation rounding algorithm.
10445 // Please refer to java.lang.Math.round(float) algorithm for details.
10446 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10447 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10448 const int32_t FloatConsts_EXP_BIAS = 127;
10449 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10450 const int32_t MINUS_32 = 0xFFFFFFE0;
10451 Label L_special_case, L_block1, L_exit;
10452 movl(rtmp, FloatConsts_EXP_BIT_MASK);
10453 movdl(dst, src);
10454 andl(dst, rtmp);
10455 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10456 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10457 subl(rtmp, dst);
10458 movl(rcx, rtmp);
10459 movl(dst, MINUS_32);
10460 testl(rtmp, dst);
10461 jccb(Assembler::notEqual, L_special_case);
10462 movdl(dst, src);
10463 andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10464 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10465 movdl(rtmp, src);
10466 testl(rtmp, rtmp);
10467 jccb(Assembler::greaterEqual, L_block1);
10468 negl(dst);
10469 bind(L_block1);
10470 sarl(dst);
10471 addl(dst, 0x1);
10472 sarl(dst, 0x1);
10473 jmp(L_exit);
10474 bind(L_special_case);
10475 convert_f2i(dst, src);
10476 bind(L_exit);
10477 }
10478
10479 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10480 // Following code is line by line assembly translation rounding algorithm.
10481 // Please refer to java.lang.Math.round(double) algorithm for details.
10482 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10483 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10484 const int64_t DoubleConsts_EXP_BIAS = 1023;
10485 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10486 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10487 Label L_special_case, L_block1, L_exit;
10488 mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10489 movq(dst, src);
10490 andq(dst, rtmp);
10491 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10492 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10493 subq(rtmp, dst);
10494 movq(rcx, rtmp);
10495 mov64(dst, MINUS_64);
10496 testq(rtmp, dst);
10497 jccb(Assembler::notEqual, L_special_case);
10498 movq(dst, src);
10499 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10500 andq(dst, rtmp);
10501 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10502 orq(dst, rtmp);
10503 movq(rtmp, src);
10504 testq(rtmp, rtmp);
10505 jccb(Assembler::greaterEqual, L_block1);
10506 negq(dst);
10507 bind(L_block1);
10508 sarq(dst);
10509 addq(dst, 0x1);
10510 sarq(dst, 0x1);
10511 jmp(L_exit);
10512 bind(L_special_case);
10513 convert_d2l(dst, src);
10514 bind(L_exit);
10515 }
10516
10517 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10518 Label done;
10519 cvttsd2siq(dst, src);
10520 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10521 jccb(Assembler::notEqual, done);
10522 subptr(rsp, 8);
10523 movdbl(Address(rsp, 0), src);
10524 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10525 pop(dst);
10526 bind(done);
10527 }
10528
10529 void MacroAssembler::cache_wb(Address line)
10530 {
10531 // 64 bit cpus always support clflush
10532 assert(VM_Version::supports_clflush(), "clflush should be available");
10533 bool optimized = VM_Version::supports_clflushopt();
10534 bool no_evict = VM_Version::supports_clwb();
10535
10536 // prefer clwb (writeback without evict) otherwise
10537 // prefer clflushopt (potentially parallel writeback with evict)
10538 // otherwise fallback on clflush (serial writeback with evict)
10539
10540 if (optimized) {
10541 if (no_evict) {
10542 clwb(line);
10543 } else {
10544 clflushopt(line);
10545 }
10546 } else {
10547 // no need for fence when using CLFLUSH
10548 clflush(line);
10549 }
10550 }
10551
10552 void MacroAssembler::cache_wbsync(bool is_pre)
10553 {
10554 assert(VM_Version::supports_clflush(), "clflush should be available");
10555 bool optimized = VM_Version::supports_clflushopt();
10556 bool no_evict = VM_Version::supports_clwb();
10557
10558 // pick the correct implementation
10559
10560 if (!is_pre && (optimized || no_evict)) {
10561 // need an sfence for post flush when using clflushopt or clwb
10562 // otherwise no no need for any synchroniaztion
10563
10564 sfence();
10565 }
10566 }
10567
10568 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10569 switch (cond) {
10570 // Note some conditions are synonyms for others
10571 case Assembler::zero: return Assembler::notZero;
10572 case Assembler::notZero: return Assembler::zero;
10573 case Assembler::less: return Assembler::greaterEqual;
10574 case Assembler::lessEqual: return Assembler::greater;
10575 case Assembler::greater: return Assembler::lessEqual;
10576 case Assembler::greaterEqual: return Assembler::less;
10577 case Assembler::below: return Assembler::aboveEqual;
10578 case Assembler::belowEqual: return Assembler::above;
10579 case Assembler::above: return Assembler::belowEqual;
10580 case Assembler::aboveEqual: return Assembler::below;
10581 case Assembler::overflow: return Assembler::noOverflow;
10582 case Assembler::noOverflow: return Assembler::overflow;
10583 case Assembler::negative: return Assembler::positive;
10584 case Assembler::positive: return Assembler::negative;
10585 case Assembler::parity: return Assembler::noParity;
10586 case Assembler::noParity: return Assembler::parity;
10587 }
10588 ShouldNotReachHere(); return Assembler::overflow;
10589 }
10590
10591 // This is simply a call to Thread::current()
10592 void MacroAssembler::get_thread_slow(Register thread) {
10593 if (thread != rax) {
10594 push(rax);
10595 }
10596 push(rdi);
10597 push(rsi);
10598 push(rdx);
10599 push(rcx);
10600 push(r8);
10601 push(r9);
10602 push(r10);
10603 push(r11);
10604
10605 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10606
10607 pop(r11);
10608 pop(r10);
10609 pop(r9);
10610 pop(r8);
10611 pop(rcx);
10612 pop(rdx);
10613 pop(rsi);
10614 pop(rdi);
10615 if (thread != rax) {
10616 mov(thread, rax);
10617 pop(rax);
10618 }
10619 }
10620
10621 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10622 Label L_stack_ok;
10623 if (bias == 0) {
10624 testptr(sp, 2 * wordSize - 1);
10625 } else {
10626 // lea(tmp, Address(rsp, bias);
10627 mov(tmp, sp);
10628 addptr(tmp, bias);
10629 testptr(tmp, 2 * wordSize - 1);
10630 }
10631 jcc(Assembler::equal, L_stack_ok);
10632 block_comment(msg);
10633 stop(msg);
10634 bind(L_stack_ok);
10635 }
10636
10637 // Implements fast-locking.
10638 //
10639 // obj: the object to be locked
10640 // reg_rax: rax
10641 // thread: the thread which attempts to lock obj
10642 // tmp: a temporary register
10643 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10644 Register thread = r15_thread;
10645
10646 assert(reg_rax == rax, "");
10647 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10648
10649 Label push;
10650 const Register top = tmp;
10651
10652 // Preload the markWord. It is important that this is the first
10653 // instruction emitted as it is part of C1's null check semantics.
10654 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10655
10656 if (UseObjectMonitorTable) {
10657 // Clear cache in case fast locking succeeds or we need to take the slow-path.
10658 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10659 }
10660
10661 if (DiagnoseSyncOnValueBasedClasses != 0) {
10662 load_klass(tmp, obj, rscratch1);
10663 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10664 jcc(Assembler::notZero, slow);
10665 }
10666
10667 // Load top.
10668 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10669
10670 // Check if the lock-stack is full.
10671 cmpl(top, LockStack::end_offset());
10672 jcc(Assembler::greaterEqual, slow);
10673
10674 // Check for recursion.
10675 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10676 jcc(Assembler::equal, push);
10677
10678 // Check header for monitor (0b10).
10679 testptr(reg_rax, markWord::monitor_value);
10680 jcc(Assembler::notZero, slow);
10681
10682 // Try to lock. Transition lock bits 0b01 => 0b00
10683 movptr(tmp, reg_rax);
10684 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10685 orptr(reg_rax, markWord::unlocked_value);
10686 // Mask inline_type bit such that we go to the slow path if object is an inline type
10687 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10688
10689 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10690 jcc(Assembler::notEqual, slow);
10691
10692 // Restore top, CAS clobbers register.
10693 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10694
10695 bind(push);
10696 // After successful lock, push object on lock-stack.
10697 movptr(Address(thread, top), obj);
10698 incrementl(top, oopSize);
10699 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10700 }
10701
10702 // Implements fast-unlocking.
10703 //
10704 // obj: the object to be unlocked
10705 // reg_rax: rax
10706 // thread: the thread
10707 // tmp: a temporary register
10708 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10709 Register thread = r15_thread;
10710
10711 assert(reg_rax == rax, "");
10712 assert_different_registers(obj, reg_rax, thread, tmp);
10713
10714 Label unlocked, push_and_slow;
10715 const Register top = tmp;
10716
10717 // Check if obj is top of lock-stack.
10718 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10719 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10720 jcc(Assembler::notEqual, slow);
10721
10722 // Pop lock-stack.
10723 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10724 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10725
10726 // Check if recursive.
10727 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10728 jcc(Assembler::equal, unlocked);
10729
10730 // Not recursive. Check header for monitor (0b10).
10731 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10732 testptr(reg_rax, markWord::monitor_value);
10733 jcc(Assembler::notZero, push_and_slow);
10734
10735 #ifdef ASSERT
10736 // Check header not unlocked (0b01).
10737 Label not_unlocked;
10738 testptr(reg_rax, markWord::unlocked_value);
10739 jcc(Assembler::zero, not_unlocked);
10740 stop("fast_unlock already unlocked");
10741 bind(not_unlocked);
10742 #endif
10743
10744 // Try to unlock. Transition lock bits 0b00 => 0b01
10745 movptr(tmp, reg_rax);
10746 orptr(tmp, markWord::unlocked_value);
10747 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10748 jcc(Assembler::equal, unlocked);
10749
10750 bind(push_and_slow);
10751 // Restore lock-stack and handle the unlock in runtime.
10752 #ifdef ASSERT
10753 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10754 movptr(Address(thread, top), obj);
10755 #endif
10756 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10757 jmp(slow);
10758
10759 bind(unlocked);
10760 }
10761
10762 // Saves legacy GPRs state on stack.
10763 void MacroAssembler::save_legacy_gprs() {
10764 subq(rsp, 16 * wordSize);
10765 movq(Address(rsp, 15 * wordSize), rax);
10766 movq(Address(rsp, 14 * wordSize), rcx);
10767 movq(Address(rsp, 13 * wordSize), rdx);
10768 movq(Address(rsp, 12 * wordSize), rbx);
10769 movq(Address(rsp, 10 * wordSize), rbp);
10770 movq(Address(rsp, 9 * wordSize), rsi);
10771 movq(Address(rsp, 8 * wordSize), rdi);
10772 movq(Address(rsp, 7 * wordSize), r8);
10773 movq(Address(rsp, 6 * wordSize), r9);
10774 movq(Address(rsp, 5 * wordSize), r10);
10775 movq(Address(rsp, 4 * wordSize), r11);
10776 movq(Address(rsp, 3 * wordSize), r12);
10777 movq(Address(rsp, 2 * wordSize), r13);
10778 movq(Address(rsp, wordSize), r14);
10779 movq(Address(rsp, 0), r15);
10780 }
10781
10782 // Resotres back legacy GPRs state from stack.
10783 void MacroAssembler::restore_legacy_gprs() {
10784 movq(r15, Address(rsp, 0));
10785 movq(r14, Address(rsp, wordSize));
10786 movq(r13, Address(rsp, 2 * wordSize));
10787 movq(r12, Address(rsp, 3 * wordSize));
10788 movq(r11, Address(rsp, 4 * wordSize));
10789 movq(r10, Address(rsp, 5 * wordSize));
10790 movq(r9, Address(rsp, 6 * wordSize));
10791 movq(r8, Address(rsp, 7 * wordSize));
10792 movq(rdi, Address(rsp, 8 * wordSize));
10793 movq(rsi, Address(rsp, 9 * wordSize));
10794 movq(rbp, Address(rsp, 10 * wordSize));
10795 movq(rbx, Address(rsp, 12 * wordSize));
10796 movq(rdx, Address(rsp, 13 * wordSize));
10797 movq(rcx, Address(rsp, 14 * wordSize));
10798 movq(rax, Address(rsp, 15 * wordSize));
10799 addq(rsp, 16 * wordSize);
10800 }
10801
10802 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10803 if (VM_Version::supports_apx_f()) {
10804 esetzucc(comparison, dst);
10805 } else {
10806 setb(comparison, dst);
10807 movzbl(dst, dst);
10808 }
10809 }