1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "compiler/compiler_globals.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "ci/ciInlineKlass.hpp"
32 #include "crc32c.h"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/tlab_globals.hpp"
37 #include "interpreter/bytecodeHistogram.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interpreterRuntime.hpp"
40 #include "jvm.h"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/accessDecorators.hpp"
44 #include "oops/compressedKlass.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/klass.inline.hpp"
47 #include "oops/resolvedFieldEntry.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/javaThread.hpp"
53 #include "runtime/jniHandles.hpp"
54 #include "runtime/objectMonitor.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/safepoint.hpp"
57 #include "runtime/safepointMechanism.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/signature_cc.hpp"
60 #include "runtime/stubRoutines.hpp"
61 #include "utilities/checkedCast.hpp"
62 #include "utilities/macros.hpp"
63 #include "vmreg_x86.inline.hpp"
64 #ifdef COMPILER2
65 #include "opto/output.hpp"
66 #endif
67
68 #ifdef PRODUCT
69 #define BLOCK_COMMENT(str) /* nothing */
70 #define STOP(error) stop(error)
71 #else
72 #define BLOCK_COMMENT(str) block_comment(str)
73 #define STOP(error) block_comment(error); stop(error)
74 #endif
75
76 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
77
78 #ifdef ASSERT
79 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
80 #endif
81
82 static const Assembler::Condition reverse[] = {
83 Assembler::noOverflow /* overflow = 0x0 */ ,
84 Assembler::overflow /* noOverflow = 0x1 */ ,
85 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
86 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
87 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
88 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
89 Assembler::above /* belowEqual = 0x6 */ ,
90 Assembler::belowEqual /* above = 0x7 */ ,
91 Assembler::positive /* negative = 0x8 */ ,
92 Assembler::negative /* positive = 0x9 */ ,
93 Assembler::noParity /* parity = 0xa */ ,
94 Assembler::parity /* noParity = 0xb */ ,
95 Assembler::greaterEqual /* less = 0xc */ ,
96 Assembler::less /* greaterEqual = 0xd */ ,
97 Assembler::greater /* lessEqual = 0xe */ ,
98 Assembler::lessEqual /* greater = 0xf, */
99
100 };
101
102
103 // Implementation of MacroAssembler
104
105 Address MacroAssembler::as_Address(AddressLiteral adr) {
106 // amd64 always does this as a pc-rel
107 // we can be absolute or disp based on the instruction type
108 // jmp/call are displacements others are absolute
109 assert(!adr.is_lval(), "must be rval");
110 assert(reachable(adr), "must be");
111 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
112
113 }
114
115 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
116 AddressLiteral base = adr.base();
117 lea(rscratch, base);
118 Address index = adr.index();
119 assert(index._disp == 0, "must not have disp"); // maybe it can?
120 Address array(rscratch, index._index, index._scale, index._disp);
121 return array;
122 }
123
124 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
125 Label L, E;
126
127 #ifdef _WIN64
128 // Windows always allocates space for it's register args
129 assert(num_args <= 4, "only register arguments supported");
130 subq(rsp, frame::arg_reg_save_area_bytes);
131 #endif
132
133 // Align stack if necessary
134 testl(rsp, 15);
135 jcc(Assembler::zero, L);
136
137 subq(rsp, 8);
138 call(RuntimeAddress(entry_point));
139 addq(rsp, 8);
140 jmp(E);
141
142 bind(L);
143 call(RuntimeAddress(entry_point));
144
145 bind(E);
146
147 #ifdef _WIN64
148 // restore stack pointer
149 addq(rsp, frame::arg_reg_save_area_bytes);
150 #endif
151 }
152
153 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
154 assert(!src2.is_lval(), "should use cmpptr");
155 assert(rscratch != noreg || always_reachable(src2), "missing");
156
157 if (reachable(src2)) {
158 cmpq(src1, as_Address(src2));
159 } else {
160 lea(rscratch, src2);
161 Assembler::cmpq(src1, Address(rscratch, 0));
162 }
163 }
164
165 int MacroAssembler::corrected_idivq(Register reg) {
166 // Full implementation of Java ldiv and lrem; checks for special
167 // case as described in JVM spec., p.243 & p.271. The function
168 // returns the (pc) offset of the idivl instruction - may be needed
169 // for implicit exceptions.
170 //
171 // normal case special case
172 //
173 // input : rax: dividend min_long
174 // reg: divisor (may not be eax/edx) -1
175 //
176 // output: rax: quotient (= rax idiv reg) min_long
177 // rdx: remainder (= rax irem reg) 0
178 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
179 static const int64_t min_long = 0x8000000000000000;
180 Label normal_case, special_case;
181
182 // check for special case
183 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
184 jcc(Assembler::notEqual, normal_case);
185 xorl(rdx, rdx); // prepare rdx for possible special case (where
186 // remainder = 0)
187 cmpq(reg, -1);
188 jcc(Assembler::equal, special_case);
189
190 // handle normal case
191 bind(normal_case);
192 cdqq();
193 int idivq_offset = offset();
194 idivq(reg);
195
196 // normal and special case exit
197 bind(special_case);
198
199 return idivq_offset;
200 }
201
202 void MacroAssembler::decrementq(Register reg, int value) {
203 if (value == min_jint) { subq(reg, value); return; }
204 if (value < 0) { incrementq(reg, -value); return; }
205 if (value == 0) { ; return; }
206 if (value == 1 && UseIncDec) { decq(reg) ; return; }
207 /* else */ { subq(reg, value) ; return; }
208 }
209
210 void MacroAssembler::decrementq(Address dst, int value) {
211 if (value == min_jint) { subq(dst, value); return; }
212 if (value < 0) { incrementq(dst, -value); return; }
213 if (value == 0) { ; return; }
214 if (value == 1 && UseIncDec) { decq(dst) ; return; }
215 /* else */ { subq(dst, value) ; return; }
216 }
217
218 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
219 assert(rscratch != noreg || always_reachable(dst), "missing");
220
221 if (reachable(dst)) {
222 incrementq(as_Address(dst));
223 } else {
224 lea(rscratch, dst);
225 incrementq(Address(rscratch, 0));
226 }
227 }
228
229 void MacroAssembler::incrementq(Register reg, int value) {
230 if (value == min_jint) { addq(reg, value); return; }
231 if (value < 0) { decrementq(reg, -value); return; }
232 if (value == 0) { ; return; }
233 if (value == 1 && UseIncDec) { incq(reg) ; return; }
234 /* else */ { addq(reg, value) ; return; }
235 }
236
237 void MacroAssembler::incrementq(Address dst, int value) {
238 if (value == min_jint) { addq(dst, value); return; }
239 if (value < 0) { decrementq(dst, -value); return; }
240 if (value == 0) { ; return; }
241 if (value == 1 && UseIncDec) { incq(dst) ; return; }
242 /* else */ { addq(dst, value) ; return; }
243 }
244
245 // 32bit can do a case table jump in one instruction but we no longer allow the base
246 // to be installed in the Address class
247 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
248 lea(rscratch, entry.base());
249 Address dispatch = entry.index();
250 assert(dispatch._base == noreg, "must be");
251 dispatch._base = rscratch;
252 jmp(dispatch);
253 }
254
255 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
256 ShouldNotReachHere(); // 64bit doesn't use two regs
257 cmpq(x_lo, y_lo);
258 }
259
260 void MacroAssembler::lea(Register dst, AddressLiteral src) {
261 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
262 }
263
264 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
265 lea(rscratch, adr);
266 movptr(dst, rscratch);
267 }
268
269 void MacroAssembler::leave() {
270 // %%% is this really better? Why not on 32bit too?
271 emit_int8((unsigned char)0xC9); // LEAVE
272 }
273
274 void MacroAssembler::lneg(Register hi, Register lo) {
275 ShouldNotReachHere(); // 64bit doesn't use two regs
276 negq(lo);
277 }
278
279 void MacroAssembler::movoop(Register dst, jobject obj) {
280 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
281 }
282
283 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
284 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
285 movq(dst, rscratch);
286 }
287
288 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
289 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
290 }
291
292 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
293 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
294 movq(dst, rscratch);
295 }
296
297 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
298 if (src.is_lval()) {
299 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
300 } else {
301 if (reachable(src)) {
302 movq(dst, as_Address(src));
303 } else {
304 lea(dst, src);
305 movq(dst, Address(dst, 0));
306 }
307 }
308 }
309
310 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
311 movq(as_Address(dst, rscratch), src);
312 }
313
314 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
315 movq(dst, as_Address(src, dst /*rscratch*/));
316 }
317
318 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
319 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
320 if (is_simm32(src)) {
321 movptr(dst, checked_cast<int32_t>(src));
322 } else {
323 mov64(rscratch, src);
324 movq(dst, rscratch);
325 }
326 }
327
328 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
329 movoop(rscratch, obj);
330 push(rscratch);
331 }
332
333 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
334 mov_metadata(rscratch, obj);
335 push(rscratch);
336 }
337
338 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
339 lea(rscratch, src);
340 if (src.is_lval()) {
341 push(rscratch);
342 } else {
343 pushq(Address(rscratch, 0));
344 }
345 }
346
347 static void pass_arg0(MacroAssembler* masm, Register arg) {
348 if (c_rarg0 != arg ) {
349 masm->mov(c_rarg0, arg);
350 }
351 }
352
353 static void pass_arg1(MacroAssembler* masm, Register arg) {
354 if (c_rarg1 != arg ) {
355 masm->mov(c_rarg1, arg);
356 }
357 }
358
359 static void pass_arg2(MacroAssembler* masm, Register arg) {
360 if (c_rarg2 != arg ) {
361 masm->mov(c_rarg2, arg);
362 }
363 }
364
365 static void pass_arg3(MacroAssembler* masm, Register arg) {
366 if (c_rarg3 != arg ) {
367 masm->mov(c_rarg3, arg);
368 }
369 }
370
371 void MacroAssembler::stop(const char* msg) {
372 if (ShowMessageBoxOnError) {
373 address rip = pc();
374 pusha(); // get regs on stack
375 lea(c_rarg1, InternalAddress(rip));
376 movq(c_rarg2, rsp); // pass pointer to regs array
377 }
378 // Skip AOT caching C strings in scratch buffer.
379 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
380 lea(c_rarg0, ExternalAddress((address) str));
381 andq(rsp, -16); // align stack as required by ABI
382 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
383 hlt();
384 }
385
386 void MacroAssembler::warn(const char* msg) {
387 push(rbp);
388 movq(rbp, rsp);
389 andq(rsp, -16); // align stack as required by push_CPU_state and call
390 push_CPU_state(); // keeps alignment at 16 bytes
391
392 #ifdef _WIN64
393 // Windows always allocates space for its register args
394 subq(rsp, frame::arg_reg_save_area_bytes);
395 #endif
396 lea(c_rarg0, ExternalAddress((address) msg));
397 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
398
399 #ifdef _WIN64
400 // restore stack pointer
401 addq(rsp, frame::arg_reg_save_area_bytes);
402 #endif
403 pop_CPU_state();
404 mov(rsp, rbp);
405 pop(rbp);
406 }
407
408 void MacroAssembler::print_state() {
409 address rip = pc();
410 pusha(); // get regs on stack
411 push(rbp);
412 movq(rbp, rsp);
413 andq(rsp, -16); // align stack as required by push_CPU_state and call
414 push_CPU_state(); // keeps alignment at 16 bytes
415
416 lea(c_rarg0, InternalAddress(rip));
417 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
418 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
419
420 pop_CPU_state();
421 mov(rsp, rbp);
422 pop(rbp);
423 popa();
424 }
425
426 #ifndef PRODUCT
427 extern "C" void findpc(intptr_t x);
428 #endif
429
430 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
431 // In order to get locks to work, we need to fake a in_VM state
432 if (ShowMessageBoxOnError) {
433 JavaThread* thread = JavaThread::current();
434 JavaThreadState saved_state = thread->thread_state();
435 thread->set_thread_state(_thread_in_vm);
436 #ifndef PRODUCT
437 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
438 ttyLocker ttyl;
439 BytecodeCounter::print();
440 }
441 #endif
442 // To see where a verify_oop failed, get $ebx+40/X for this frame.
443 // XXX correct this offset for amd64
444 // This is the value of eip which points to where verify_oop will return.
445 if (os::message_box(msg, "Execution stopped, print registers?")) {
446 print_state64(pc, regs);
447 BREAKPOINT;
448 }
449 }
450 fatal("DEBUG MESSAGE: %s", msg);
451 }
452
453 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
454 ttyLocker ttyl;
455 DebuggingContext debugging{};
456 tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
457 #ifndef PRODUCT
458 tty->cr();
459 findpc(pc);
460 tty->cr();
461 #endif
462 #define PRINT_REG(rax, value) \
463 { tty->print("%s = ", #rax); os::print_location(tty, value); }
464 PRINT_REG(rax, regs[15]);
465 PRINT_REG(rbx, regs[12]);
466 PRINT_REG(rcx, regs[14]);
467 PRINT_REG(rdx, regs[13]);
468 PRINT_REG(rdi, regs[8]);
469 PRINT_REG(rsi, regs[9]);
470 PRINT_REG(rbp, regs[10]);
471 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
472 PRINT_REG(rsp, (intptr_t)(®s[16]));
473 PRINT_REG(r8 , regs[7]);
474 PRINT_REG(r9 , regs[6]);
475 PRINT_REG(r10, regs[5]);
476 PRINT_REG(r11, regs[4]);
477 PRINT_REG(r12, regs[3]);
478 PRINT_REG(r13, regs[2]);
479 PRINT_REG(r14, regs[1]);
480 PRINT_REG(r15, regs[0]);
481 #undef PRINT_REG
482 // Print some words near the top of the stack.
483 int64_t* rsp = ®s[16];
484 int64_t* dump_sp = rsp;
485 for (int col1 = 0; col1 < 8; col1++) {
486 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
487 os::print_location(tty, *dump_sp++);
488 }
489 for (int row = 0; row < 25; row++) {
490 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
491 for (int col = 0; col < 4; col++) {
492 tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
493 }
494 tty->cr();
495 }
496 // Print some instructions around pc:
497 Disassembler::decode((address)pc-64, (address)pc);
498 tty->print_cr("--------");
499 Disassembler::decode((address)pc, (address)pc+32);
500 }
501
502 // The java_calling_convention describes stack locations as ideal slots on
503 // a frame with no abi restrictions. Since we must observe abi restrictions
504 // (like the placement of the register window) the slots must be biased by
505 // the following value.
506 static int reg2offset_in(VMReg r) {
507 // Account for saved rbp and return address
508 // This should really be in_preserve_stack_slots
509 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
510 }
511
512 static int reg2offset_out(VMReg r) {
513 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
514 }
515
516 // A long move
517 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
518
519 // The calling conventions assures us that each VMregpair is either
520 // all really one physical register or adjacent stack slots.
521
522 if (src.is_single_phys_reg() ) {
523 if (dst.is_single_phys_reg()) {
524 if (dst.first() != src.first()) {
525 mov(dst.first()->as_Register(), src.first()->as_Register());
526 }
527 } else {
528 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
529 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
530 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
531 }
532 } else if (dst.is_single_phys_reg()) {
533 assert(src.is_single_reg(), "not a stack pair");
534 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
535 } else {
536 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
537 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
538 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
539 }
540 }
541
542 // A double move
543 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
544
545 // The calling conventions assures us that each VMregpair is either
546 // all really one physical register or adjacent stack slots.
547
548 if (src.is_single_phys_reg() ) {
549 if (dst.is_single_phys_reg()) {
550 // In theory these overlap but the ordering is such that this is likely a nop
551 if ( src.first() != dst.first()) {
552 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
553 }
554 } else {
555 assert(dst.is_single_reg(), "not a stack pair");
556 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
557 }
558 } else if (dst.is_single_phys_reg()) {
559 assert(src.is_single_reg(), "not a stack pair");
560 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
561 } else {
562 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
563 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
564 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
565 }
566 }
567
568
569 // A float arg may have to do float reg int reg conversion
570 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
571 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
572
573 // The calling conventions assures us that each VMregpair is either
574 // all really one physical register or adjacent stack slots.
575
576 if (src.first()->is_stack()) {
577 if (dst.first()->is_stack()) {
578 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
579 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
580 } else {
581 // stack to reg
582 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
583 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
584 }
585 } else if (dst.first()->is_stack()) {
586 // reg to stack
587 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
588 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
589 } else {
590 // reg to reg
591 // In theory these overlap but the ordering is such that this is likely a nop
592 if ( src.first() != dst.first()) {
593 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
594 }
595 }
596 }
597
598 // On 64 bit we will store integer like items to the stack as
599 // 64 bits items (x86_32/64 abi) even though java would only store
600 // 32bits for a parameter. On 32bit it will simply be 32 bits
601 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
602 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
603 if (src.first()->is_stack()) {
604 if (dst.first()->is_stack()) {
605 // stack to stack
606 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
607 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
608 } else {
609 // stack to reg
610 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
611 }
612 } else if (dst.first()->is_stack()) {
613 // reg to stack
614 // Do we really have to sign extend???
615 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
616 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
617 } else {
618 // Do we really have to sign extend???
619 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
620 if (dst.first() != src.first()) {
621 movq(dst.first()->as_Register(), src.first()->as_Register());
622 }
623 }
624 }
625
626 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
627 if (src.first()->is_stack()) {
628 if (dst.first()->is_stack()) {
629 // stack to stack
630 movq(rax, Address(rbp, reg2offset_in(src.first())));
631 movq(Address(rsp, reg2offset_out(dst.first())), rax);
632 } else {
633 // stack to reg
634 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
635 }
636 } else if (dst.first()->is_stack()) {
637 // reg to stack
638 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
639 } else {
640 if (dst.first() != src.first()) {
641 movq(dst.first()->as_Register(), src.first()->as_Register());
642 }
643 }
644 }
645
646 // An oop arg. Must pass a handle not the oop itself
647 void MacroAssembler::object_move(OopMap* map,
648 int oop_handle_offset,
649 int framesize_in_slots,
650 VMRegPair src,
651 VMRegPair dst,
652 bool is_receiver,
653 int* receiver_offset) {
654
655 // must pass a handle. First figure out the location we use as a handle
656
657 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
658
659 // See if oop is null if it is we need no handle
660
661 if (src.first()->is_stack()) {
662
663 // Oop is already on the stack as an argument
664 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
665 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
666 if (is_receiver) {
667 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
668 }
669
670 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
671 lea(rHandle, Address(rbp, reg2offset_in(src.first())));
672 // conditionally move a null
673 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
674 } else {
675
676 // Oop is in a register we must store it to the space we reserve
677 // on the stack for oop_handles and pass a handle if oop is non-null
678
679 const Register rOop = src.first()->as_Register();
680 int oop_slot;
681 if (rOop == j_rarg0)
682 oop_slot = 0;
683 else if (rOop == j_rarg1)
684 oop_slot = 1;
685 else if (rOop == j_rarg2)
686 oop_slot = 2;
687 else if (rOop == j_rarg3)
688 oop_slot = 3;
689 else if (rOop == j_rarg4)
690 oop_slot = 4;
691 else {
692 assert(rOop == j_rarg5, "wrong register");
693 oop_slot = 5;
694 }
695
696 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
697 int offset = oop_slot*VMRegImpl::stack_slot_size;
698
699 map->set_oop(VMRegImpl::stack2reg(oop_slot));
700 // Store oop in handle area, may be null
701 movptr(Address(rsp, offset), rOop);
702 if (is_receiver) {
703 *receiver_offset = offset;
704 }
705
706 cmpptr(rOop, NULL_WORD);
707 lea(rHandle, Address(rsp, offset));
708 // conditionally move a null from the handle area where it was just stored
709 cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
710 }
711
712 // If arg is on the stack then place it otherwise it is already in correct reg.
713 if (dst.first()->is_stack()) {
714 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
715 }
716 }
717
718 void MacroAssembler::addptr(Register dst, int32_t imm32) {
719 addq(dst, imm32);
720 }
721
722 void MacroAssembler::addptr(Register dst, Register src) {
723 addq(dst, src);
724 }
725
726 void MacroAssembler::addptr(Address dst, Register src) {
727 addq(dst, src);
728 }
729
730 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
731 assert(rscratch != noreg || always_reachable(src), "missing");
732
733 if (reachable(src)) {
734 Assembler::addsd(dst, as_Address(src));
735 } else {
736 lea(rscratch, src);
737 Assembler::addsd(dst, Address(rscratch, 0));
738 }
739 }
740
741 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
742 assert(rscratch != noreg || always_reachable(src), "missing");
743
744 if (reachable(src)) {
745 addss(dst, as_Address(src));
746 } else {
747 lea(rscratch, src);
748 addss(dst, Address(rscratch, 0));
749 }
750 }
751
752 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
753 assert(rscratch != noreg || always_reachable(src), "missing");
754
755 if (reachable(src)) {
756 Assembler::addpd(dst, as_Address(src));
757 } else {
758 lea(rscratch, src);
759 Assembler::addpd(dst, Address(rscratch, 0));
760 }
761 }
762
763 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only.
764 // Stub code is generated once and never copied.
765 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
766 void MacroAssembler::align64() {
767 align(64, (uint)(uintptr_t)pc());
768 }
769
770 void MacroAssembler::align32() {
771 align(32, (uint)(uintptr_t)pc());
772 }
773
774 void MacroAssembler::align(uint modulus) {
775 // 8273459: Ensure alignment is possible with current segment alignment
776 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
777 align(modulus, offset());
778 }
779
780 void MacroAssembler::align(uint modulus, uint target) {
781 if (target % modulus != 0) {
782 nop(modulus - (target % modulus));
783 }
784 }
785
786 void MacroAssembler::push_f(XMMRegister r) {
787 subptr(rsp, wordSize);
788 movflt(Address(rsp, 0), r);
789 }
790
791 void MacroAssembler::pop_f(XMMRegister r) {
792 movflt(r, Address(rsp, 0));
793 addptr(rsp, wordSize);
794 }
795
796 void MacroAssembler::push_d(XMMRegister r) {
797 subptr(rsp, 2 * wordSize);
798 movdbl(Address(rsp, 0), r);
799 }
800
801 void MacroAssembler::pop_d(XMMRegister r) {
802 movdbl(r, Address(rsp, 0));
803 addptr(rsp, 2 * Interpreter::stackElementSize);
804 }
805
806 void MacroAssembler::push_ppx(Register src) {
807 if (VM_Version::supports_apx_f()) {
808 pushp(src);
809 } else {
810 Assembler::push(src);
811 }
812 }
813
814 void MacroAssembler::pop_ppx(Register dst) {
815 if (VM_Version::supports_apx_f()) {
816 popp(dst);
817 } else {
818 Assembler::pop(dst);
819 }
820 }
821
822 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
823 // Used in sign-masking with aligned address.
824 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
825 assert(rscratch != noreg || always_reachable(src), "missing");
826
827 if (UseAVX > 2 &&
828 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
829 (dst->encoding() >= 16)) {
830 vpand(dst, dst, src, AVX_512bit, rscratch);
831 } else if (reachable(src)) {
832 Assembler::andpd(dst, as_Address(src));
833 } else {
834 lea(rscratch, src);
835 Assembler::andpd(dst, Address(rscratch, 0));
836 }
837 }
838
839 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
840 // Used in sign-masking with aligned address.
841 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
842 assert(rscratch != noreg || always_reachable(src), "missing");
843
844 if (reachable(src)) {
845 Assembler::andps(dst, as_Address(src));
846 } else {
847 lea(rscratch, src);
848 Assembler::andps(dst, Address(rscratch, 0));
849 }
850 }
851
852 void MacroAssembler::andptr(Register dst, int32_t imm32) {
853 andq(dst, imm32);
854 }
855
856 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
857 assert(rscratch != noreg || always_reachable(src), "missing");
858
859 if (reachable(src)) {
860 andq(dst, as_Address(src));
861 } else {
862 lea(rscratch, src);
863 andq(dst, Address(rscratch, 0));
864 }
865 }
866
867 void MacroAssembler::atomic_incl(Address counter_addr) {
868 lock();
869 incrementl(counter_addr);
870 }
871
872 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
873 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
874
875 if (reachable(counter_addr)) {
876 atomic_incl(as_Address(counter_addr));
877 } else {
878 lea(rscratch, counter_addr);
879 atomic_incl(Address(rscratch, 0));
880 }
881 }
882
883 void MacroAssembler::atomic_incq(Address counter_addr) {
884 lock();
885 incrementq(counter_addr);
886 }
887
888 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
889 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
890
891 if (reachable(counter_addr)) {
892 atomic_incq(as_Address(counter_addr));
893 } else {
894 lea(rscratch, counter_addr);
895 atomic_incq(Address(rscratch, 0));
896 }
897 }
898
899 // Writes to stack successive pages until offset reached to check for
900 // stack overflow + shadow pages. This clobbers tmp.
901 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
902 movptr(tmp, rsp);
903 // Bang stack for total size given plus shadow page size.
904 // Bang one page at a time because large size can bang beyond yellow and
905 // red zones.
906 Label loop;
907 bind(loop);
908 movl(Address(tmp, (-(int)os::vm_page_size())), size );
909 subptr(tmp, (int)os::vm_page_size());
910 subl(size, (int)os::vm_page_size());
911 jcc(Assembler::greater, loop);
912
913 // Bang down shadow pages too.
914 // At this point, (tmp-0) is the last address touched, so don't
915 // touch it again. (It was touched as (tmp-pagesize) but then tmp
916 // was post-decremented.) Skip this address by starting at i=1, and
917 // touch a few more pages below. N.B. It is important to touch all
918 // the way down including all pages in the shadow zone.
919 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
920 // this could be any sized move but this is can be a debugging crumb
921 // so the bigger the better.
922 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
923 }
924 }
925
926 void MacroAssembler::reserved_stack_check() {
927 // testing if reserved zone needs to be enabled
928 Label no_reserved_zone_enabling;
929
930 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
931 jcc(Assembler::below, no_reserved_zone_enabling);
932
933 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
934 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
935 should_not_reach_here();
936
937 bind(no_reserved_zone_enabling);
938 }
939
940 void MacroAssembler::c2bool(Register x) {
941 // implements x == 0 ? 0 : 1
942 // note: must only look at least-significant byte of x
943 // since C-style booleans are stored in one byte
944 // only! (was bug)
945 andl(x, 0xFF);
946 setb(Assembler::notZero, x);
947 }
948
949 // Wouldn't need if AddressLiteral version had new name
950 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
951 Assembler::call(L, rtype);
952 }
953
954 void MacroAssembler::call(Register entry) {
955 Assembler::call(entry);
956 }
957
958 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
959 assert(rscratch != noreg || always_reachable(entry), "missing");
960
961 if (reachable(entry)) {
962 Assembler::call_literal(entry.target(), entry.rspec());
963 } else {
964 lea(rscratch, entry);
965 Assembler::call(rscratch);
966 }
967 }
968
969 void MacroAssembler::ic_call(address entry, jint method_index) {
970 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
971 // Needs full 64-bit immediate for later patching.
972 mov64(rax, (int64_t)Universe::non_oop_word());
973 call(AddressLiteral(entry, rh));
974 }
975
976 int MacroAssembler::ic_check_size() {
977 return UseCompactObjectHeaders ? 17 : 14;
978 }
979
980 int MacroAssembler::ic_check(int end_alignment) {
981 Register receiver = j_rarg0;
982 Register data = rax;
983 Register temp = rscratch1;
984
985 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
986 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
987 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
988 // before the inline cache check here, and not after
989 align(end_alignment, offset() + ic_check_size());
990
991 int uep_offset = offset();
992
993 if (UseCompactObjectHeaders) {
994 load_narrow_klass_compact(temp, receiver);
995 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
996 } else if (UseCompressedClassPointers) {
997 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
998 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
999 } else {
1000 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1001 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
1002 }
1003
1004 // if inline cache check fails, then jump to runtime routine
1005 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1006 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
1007
1008 return uep_offset;
1009 }
1010
1011 void MacroAssembler::emit_static_call_stub() {
1012 // Static stub relocation also tags the Method* in the code-stream.
1013 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1014 // This is recognized as unresolved by relocs/nativeinst/ic code.
1015 jump(RuntimeAddress(pc()));
1016 }
1017
1018 // Implementation of call_VM versions
1019
1020 void MacroAssembler::call_VM(Register oop_result,
1021 address entry_point,
1022 bool check_exceptions) {
1023 Label C, E;
1024 call(C, relocInfo::none);
1025 jmp(E);
1026
1027 bind(C);
1028 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1029 ret(0);
1030
1031 bind(E);
1032 }
1033
1034 void MacroAssembler::call_VM(Register oop_result,
1035 address entry_point,
1036 Register arg_1,
1037 bool check_exceptions) {
1038 Label C, E;
1039 call(C, relocInfo::none);
1040 jmp(E);
1041
1042 bind(C);
1043 pass_arg1(this, arg_1);
1044 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1045 ret(0);
1046
1047 bind(E);
1048 }
1049
1050 void MacroAssembler::call_VM(Register oop_result,
1051 address entry_point,
1052 Register arg_1,
1053 Register arg_2,
1054 bool check_exceptions) {
1055 Label C, E;
1056 call(C, relocInfo::none);
1057 jmp(E);
1058
1059 bind(C);
1060
1061 assert_different_registers(arg_1, c_rarg2);
1062
1063 pass_arg2(this, arg_2);
1064 pass_arg1(this, arg_1);
1065 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1066 ret(0);
1067
1068 bind(E);
1069 }
1070
1071 void MacroAssembler::call_VM(Register oop_result,
1072 address entry_point,
1073 Register arg_1,
1074 Register arg_2,
1075 Register arg_3,
1076 bool check_exceptions) {
1077 Label C, E;
1078 call(C, relocInfo::none);
1079 jmp(E);
1080
1081 bind(C);
1082
1083 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1084 assert_different_registers(arg_2, c_rarg3);
1085 pass_arg3(this, arg_3);
1086 pass_arg2(this, arg_2);
1087 pass_arg1(this, arg_1);
1088 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1089 ret(0);
1090
1091 bind(E);
1092 }
1093
1094 void MacroAssembler::call_VM(Register oop_result,
1095 Register last_java_sp,
1096 address entry_point,
1097 int number_of_arguments,
1098 bool check_exceptions) {
1099 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1100 }
1101
1102 void MacroAssembler::call_VM(Register oop_result,
1103 Register last_java_sp,
1104 address entry_point,
1105 Register arg_1,
1106 bool check_exceptions) {
1107 pass_arg1(this, arg_1);
1108 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1109 }
1110
1111 void MacroAssembler::call_VM(Register oop_result,
1112 Register last_java_sp,
1113 address entry_point,
1114 Register arg_1,
1115 Register arg_2,
1116 bool check_exceptions) {
1117
1118 assert_different_registers(arg_1, c_rarg2);
1119 pass_arg2(this, arg_2);
1120 pass_arg1(this, arg_1);
1121 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1122 }
1123
1124 void MacroAssembler::call_VM(Register oop_result,
1125 Register last_java_sp,
1126 address entry_point,
1127 Register arg_1,
1128 Register arg_2,
1129 Register arg_3,
1130 bool check_exceptions) {
1131 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1132 assert_different_registers(arg_2, c_rarg3);
1133 pass_arg3(this, arg_3);
1134 pass_arg2(this, arg_2);
1135 pass_arg1(this, arg_1);
1136 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1137 }
1138
1139 void MacroAssembler::super_call_VM(Register oop_result,
1140 Register last_java_sp,
1141 address entry_point,
1142 int number_of_arguments,
1143 bool check_exceptions) {
1144 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1145 }
1146
1147 void MacroAssembler::super_call_VM(Register oop_result,
1148 Register last_java_sp,
1149 address entry_point,
1150 Register arg_1,
1151 bool check_exceptions) {
1152 pass_arg1(this, arg_1);
1153 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1154 }
1155
1156 void MacroAssembler::super_call_VM(Register oop_result,
1157 Register last_java_sp,
1158 address entry_point,
1159 Register arg_1,
1160 Register arg_2,
1161 bool check_exceptions) {
1162
1163 assert_different_registers(arg_1, c_rarg2);
1164 pass_arg2(this, arg_2);
1165 pass_arg1(this, arg_1);
1166 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1167 }
1168
1169 void MacroAssembler::super_call_VM(Register oop_result,
1170 Register last_java_sp,
1171 address entry_point,
1172 Register arg_1,
1173 Register arg_2,
1174 Register arg_3,
1175 bool check_exceptions) {
1176 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1177 assert_different_registers(arg_2, c_rarg3);
1178 pass_arg3(this, arg_3);
1179 pass_arg2(this, arg_2);
1180 pass_arg1(this, arg_1);
1181 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1182 }
1183
1184 void MacroAssembler::call_VM_base(Register oop_result,
1185 Register last_java_sp,
1186 address entry_point,
1187 int number_of_arguments,
1188 bool check_exceptions) {
1189 Register java_thread = r15_thread;
1190
1191 // determine last_java_sp register
1192 if (!last_java_sp->is_valid()) {
1193 last_java_sp = rsp;
1194 }
1195 // debugging support
1196 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1197 #ifdef ASSERT
1198 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
1199 // r12 is the heapbase.
1200 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
1201 #endif // ASSERT
1202
1203 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1204 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1205
1206 // push java thread (becomes first argument of C function)
1207
1208 mov(c_rarg0, r15_thread);
1209
1210 // set last Java frame before call
1211 assert(last_java_sp != rbp, "can't use ebp/rbp");
1212
1213 // Only interpreter should have to set fp
1214 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
1215
1216 // do the call, remove parameters
1217 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1218
1219 #ifdef ASSERT
1220 // Check that thread register is not clobbered.
1221 guarantee(java_thread != rax, "change this code");
1222 push(rax);
1223 { Label L;
1224 get_thread_slow(rax);
1225 cmpptr(java_thread, rax);
1226 jcc(Assembler::equal, L);
1227 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
1228 bind(L);
1229 }
1230 pop(rax);
1231 #endif
1232
1233 // reset last Java frame
1234 // Only interpreter should have to clear fp
1235 reset_last_Java_frame(true);
1236
1237 // C++ interp handles this in the interpreter
1238 check_and_handle_popframe();
1239 check_and_handle_earlyret();
1240
1241 if (check_exceptions) {
1242 // check for pending exceptions (java_thread is set upon return)
1243 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1244 // This used to conditionally jump to forward_exception however it is
1245 // possible if we relocate that the branch will not reach. So we must jump
1246 // around so we can always reach
1247
1248 Label ok;
1249 jcc(Assembler::equal, ok);
1250 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1251 bind(ok);
1252 }
1253
1254 // get oop result if there is one and reset the value in the thread
1255 if (oop_result->is_valid()) {
1256 get_vm_result_oop(oop_result);
1257 }
1258 }
1259
1260 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1261 // Calculate the value for last_Java_sp somewhat subtle.
1262 // call_VM does an intermediate call which places a return address on
1263 // the stack just under the stack pointer as the user finished with it.
1264 // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
1265
1266 // We've pushed one address, correct last_Java_sp
1267 lea(rax, Address(rsp, wordSize));
1268
1269 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
1270 }
1271
1272 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
1273 void MacroAssembler::call_VM_leaf0(address entry_point) {
1274 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1275 }
1276
1277 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1278 call_VM_leaf_base(entry_point, number_of_arguments);
1279 }
1280
1281 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1282 pass_arg0(this, arg_0);
1283 call_VM_leaf(entry_point, 1);
1284 }
1285
1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1287
1288 assert_different_registers(arg_0, c_rarg1);
1289 pass_arg1(this, arg_1);
1290 pass_arg0(this, arg_0);
1291 call_VM_leaf(entry_point, 2);
1292 }
1293
1294 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1295 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1296 assert_different_registers(arg_1, c_rarg2);
1297 pass_arg2(this, arg_2);
1298 pass_arg1(this, arg_1);
1299 pass_arg0(this, arg_0);
1300 call_VM_leaf(entry_point, 3);
1301 }
1302
1303 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1304 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1305 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1306 assert_different_registers(arg_2, c_rarg3);
1307 pass_arg3(this, arg_3);
1308 pass_arg2(this, arg_2);
1309 pass_arg1(this, arg_1);
1310 pass_arg0(this, arg_0);
1311 call_VM_leaf(entry_point, 3);
1312 }
1313
1314 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1315 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1316 }
1317
1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1319 pass_arg0(this, arg_0);
1320 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1321 }
1322
1323 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1324 assert_different_registers(arg_0, c_rarg1);
1325 pass_arg1(this, arg_1);
1326 pass_arg0(this, arg_0);
1327 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1328 }
1329
1330 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1331 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1332 assert_different_registers(arg_1, c_rarg2);
1333 pass_arg2(this, arg_2);
1334 pass_arg1(this, arg_1);
1335 pass_arg0(this, arg_0);
1336 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1337 }
1338
1339 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1340 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
1341 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1342 assert_different_registers(arg_2, c_rarg3);
1343 pass_arg3(this, arg_3);
1344 pass_arg2(this, arg_2);
1345 pass_arg1(this, arg_1);
1346 pass_arg0(this, arg_0);
1347 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1348 }
1349
1350 void MacroAssembler::get_vm_result_oop(Register oop_result) {
1351 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
1352 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
1353 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1354 }
1355
1356 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
1357 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
1358 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
1359 }
1360
1361 void MacroAssembler::check_and_handle_earlyret() {
1362 }
1363
1364 void MacroAssembler::check_and_handle_popframe() {
1365 }
1366
1367 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
1368 assert(rscratch != noreg || always_reachable(src1), "missing");
1369
1370 if (reachable(src1)) {
1371 cmpl(as_Address(src1), imm);
1372 } else {
1373 lea(rscratch, src1);
1374 cmpl(Address(rscratch, 0), imm);
1375 }
1376 }
1377
1378 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
1379 assert(!src2.is_lval(), "use cmpptr");
1380 assert(rscratch != noreg || always_reachable(src2), "missing");
1381
1382 if (reachable(src2)) {
1383 cmpl(src1, as_Address(src2));
1384 } else {
1385 lea(rscratch, src2);
1386 cmpl(src1, Address(rscratch, 0));
1387 }
1388 }
1389
1390 void MacroAssembler::cmp32(Register src1, int32_t imm) {
1391 Assembler::cmpl(src1, imm);
1392 }
1393
1394 void MacroAssembler::cmp32(Register src1, Address src2) {
1395 Assembler::cmpl(src1, src2);
1396 }
1397
1398 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1399 ucomisd(opr1, opr2);
1400
1401 Label L;
1402 if (unordered_is_less) {
1403 movl(dst, -1);
1404 jcc(Assembler::parity, L);
1405 jcc(Assembler::below , L);
1406 movl(dst, 0);
1407 jcc(Assembler::equal , L);
1408 increment(dst);
1409 } else { // unordered is greater
1410 movl(dst, 1);
1411 jcc(Assembler::parity, L);
1412 jcc(Assembler::above , L);
1413 movl(dst, 0);
1414 jcc(Assembler::equal , L);
1415 decrementl(dst);
1416 }
1417 bind(L);
1418 }
1419
1420 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1421 ucomiss(opr1, opr2);
1422
1423 Label L;
1424 if (unordered_is_less) {
1425 movl(dst, -1);
1426 jcc(Assembler::parity, L);
1427 jcc(Assembler::below , L);
1428 movl(dst, 0);
1429 jcc(Assembler::equal , L);
1430 increment(dst);
1431 } else { // unordered is greater
1432 movl(dst, 1);
1433 jcc(Assembler::parity, L);
1434 jcc(Assembler::above , L);
1435 movl(dst, 0);
1436 jcc(Assembler::equal , L);
1437 decrementl(dst);
1438 }
1439 bind(L);
1440 }
1441
1442
1443 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
1444 assert(rscratch != noreg || always_reachable(src1), "missing");
1445
1446 if (reachable(src1)) {
1447 cmpb(as_Address(src1), imm);
1448 } else {
1449 lea(rscratch, src1);
1450 cmpb(Address(rscratch, 0), imm);
1451 }
1452 }
1453
1454 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
1455 assert(rscratch != noreg || always_reachable(src2), "missing");
1456
1457 if (src2.is_lval()) {
1458 movptr(rscratch, src2);
1459 Assembler::cmpq(src1, rscratch);
1460 } else if (reachable(src2)) {
1461 cmpq(src1, as_Address(src2));
1462 } else {
1463 lea(rscratch, src2);
1464 Assembler::cmpq(src1, Address(rscratch, 0));
1465 }
1466 }
1467
1468 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
1469 assert(src2.is_lval(), "not a mem-mem compare");
1470 // moves src2's literal address
1471 movptr(rscratch, src2);
1472 Assembler::cmpq(src1, rscratch);
1473 }
1474
1475 void MacroAssembler::cmpoop(Register src1, Register src2) {
1476 cmpptr(src1, src2);
1477 }
1478
1479 void MacroAssembler::cmpoop(Register src1, Address src2) {
1480 cmpptr(src1, src2);
1481 }
1482
1483 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
1484 movoop(rscratch, src2);
1485 cmpptr(src1, rscratch);
1486 }
1487
1488 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
1489 assert(rscratch != noreg || always_reachable(adr), "missing");
1490
1491 if (reachable(adr)) {
1492 lock();
1493 cmpxchgptr(reg, as_Address(adr));
1494 } else {
1495 lea(rscratch, adr);
1496 lock();
1497 cmpxchgptr(reg, Address(rscratch, 0));
1498 }
1499 }
1500
1501 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
1502 cmpxchgq(reg, adr);
1503 }
1504
1505 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1506 assert(rscratch != noreg || always_reachable(src), "missing");
1507
1508 if (reachable(src)) {
1509 Assembler::comisd(dst, as_Address(src));
1510 } else {
1511 lea(rscratch, src);
1512 Assembler::comisd(dst, Address(rscratch, 0));
1513 }
1514 }
1515
1516 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1517 assert(rscratch != noreg || always_reachable(src), "missing");
1518
1519 if (reachable(src)) {
1520 Assembler::comiss(dst, as_Address(src));
1521 } else {
1522 lea(rscratch, src);
1523 Assembler::comiss(dst, Address(rscratch, 0));
1524 }
1525 }
1526
1527
1528 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
1529 assert(rscratch != noreg || always_reachable(counter_addr), "missing");
1530
1531 Condition negated_cond = negate_condition(cond);
1532 Label L;
1533 jcc(negated_cond, L);
1534 pushf(); // Preserve flags
1535 atomic_incl(counter_addr, rscratch);
1536 popf();
1537 bind(L);
1538 }
1539
1540 int MacroAssembler::corrected_idivl(Register reg) {
1541 // Full implementation of Java idiv and irem; checks for
1542 // special case as described in JVM spec., p.243 & p.271.
1543 // The function returns the (pc) offset of the idivl
1544 // instruction - may be needed for implicit exceptions.
1545 //
1546 // normal case special case
1547 //
1548 // input : rax,: dividend min_int
1549 // reg: divisor (may not be rax,/rdx) -1
1550 //
1551 // output: rax,: quotient (= rax, idiv reg) min_int
1552 // rdx: remainder (= rax, irem reg) 0
1553 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
1554 const int min_int = 0x80000000;
1555 Label normal_case, special_case;
1556
1557 // check for special case
1558 cmpl(rax, min_int);
1559 jcc(Assembler::notEqual, normal_case);
1560 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
1561 cmpl(reg, -1);
1562 jcc(Assembler::equal, special_case);
1563
1564 // handle normal case
1565 bind(normal_case);
1566 cdql();
1567 int idivl_offset = offset();
1568 idivl(reg);
1569
1570 // normal and special case exit
1571 bind(special_case);
1572
1573 return idivl_offset;
1574 }
1575
1576
1577
1578 void MacroAssembler::decrementl(Register reg, int value) {
1579 if (value == min_jint) {subl(reg, value) ; return; }
1580 if (value < 0) { incrementl(reg, -value); return; }
1581 if (value == 0) { ; return; }
1582 if (value == 1 && UseIncDec) { decl(reg) ; return; }
1583 /* else */ { subl(reg, value) ; return; }
1584 }
1585
1586 void MacroAssembler::decrementl(Address dst, int value) {
1587 if (value == min_jint) {subl(dst, value) ; return; }
1588 if (value < 0) { incrementl(dst, -value); return; }
1589 if (value == 0) { ; return; }
1590 if (value == 1 && UseIncDec) { decl(dst) ; return; }
1591 /* else */ { subl(dst, value) ; return; }
1592 }
1593
1594 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
1595 assert(shift_value > 0, "illegal shift value");
1596 Label _is_positive;
1597 testl (reg, reg);
1598 jcc (Assembler::positive, _is_positive);
1599 int offset = (1 << shift_value) - 1 ;
1600
1601 if (offset == 1) {
1602 incrementl(reg);
1603 } else {
1604 addl(reg, offset);
1605 }
1606
1607 bind (_is_positive);
1608 sarl(reg, shift_value);
1609 }
1610
1611 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1612 assert(rscratch != noreg || always_reachable(src), "missing");
1613
1614 if (reachable(src)) {
1615 Assembler::divsd(dst, as_Address(src));
1616 } else {
1617 lea(rscratch, src);
1618 Assembler::divsd(dst, Address(rscratch, 0));
1619 }
1620 }
1621
1622 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
1623 assert(rscratch != noreg || always_reachable(src), "missing");
1624
1625 if (reachable(src)) {
1626 Assembler::divss(dst, as_Address(src));
1627 } else {
1628 lea(rscratch, src);
1629 Assembler::divss(dst, Address(rscratch, 0));
1630 }
1631 }
1632
1633 void MacroAssembler::enter() {
1634 push(rbp);
1635 mov(rbp, rsp);
1636 }
1637
1638 void MacroAssembler::post_call_nop() {
1639 if (!Continuations::enabled()) {
1640 return;
1641 }
1642 InstructionMark im(this);
1643 relocate(post_call_nop_Relocation::spec());
1644 InlineSkippedInstructionsCounter skipCounter(this);
1645 emit_int8((uint8_t)0x0f);
1646 emit_int8((uint8_t)0x1f);
1647 emit_int8((uint8_t)0x84);
1648 emit_int8((uint8_t)0x00);
1649 emit_int32(0x00);
1650 }
1651
1652 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
1653 assert(rscratch != noreg || always_reachable(src), "missing");
1654 if (reachable(src)) {
1655 Assembler::mulpd(dst, as_Address(src));
1656 } else {
1657 lea(rscratch, src);
1658 Assembler::mulpd(dst, Address(rscratch, 0));
1659 }
1660 }
1661
1662 // dst = c = a * b + c
1663 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1664 Assembler::vfmadd231sd(c, a, b);
1665 if (dst != c) {
1666 movdbl(dst, c);
1667 }
1668 }
1669
1670 // dst = c = a * b + c
1671 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
1672 Assembler::vfmadd231ss(c, a, b);
1673 if (dst != c) {
1674 movflt(dst, c);
1675 }
1676 }
1677
1678 // dst = c = a * b + c
1679 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1680 Assembler::vfmadd231pd(c, a, b, vector_len);
1681 if (dst != c) {
1682 vmovdqu(dst, c);
1683 }
1684 }
1685
1686 // dst = c = a * b + c
1687 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
1688 Assembler::vfmadd231ps(c, a, b, vector_len);
1689 if (dst != c) {
1690 vmovdqu(dst, c);
1691 }
1692 }
1693
1694 // dst = c = a * b + c
1695 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1696 Assembler::vfmadd231pd(c, a, b, vector_len);
1697 if (dst != c) {
1698 vmovdqu(dst, c);
1699 }
1700 }
1701
1702 // dst = c = a * b + c
1703 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
1704 Assembler::vfmadd231ps(c, a, b, vector_len);
1705 if (dst != c) {
1706 vmovdqu(dst, c);
1707 }
1708 }
1709
1710 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
1711 assert(rscratch != noreg || always_reachable(dst), "missing");
1712
1713 if (reachable(dst)) {
1714 incrementl(as_Address(dst));
1715 } else {
1716 lea(rscratch, dst);
1717 incrementl(Address(rscratch, 0));
1718 }
1719 }
1720
1721 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
1722 incrementl(as_Address(dst, rscratch));
1723 }
1724
1725 void MacroAssembler::incrementl(Register reg, int value) {
1726 if (value == min_jint) {addl(reg, value) ; return; }
1727 if (value < 0) { decrementl(reg, -value); return; }
1728 if (value == 0) { ; return; }
1729 if (value == 1 && UseIncDec) { incl(reg) ; return; }
1730 /* else */ { addl(reg, value) ; return; }
1731 }
1732
1733 void MacroAssembler::incrementl(Address dst, int value) {
1734 if (value == min_jint) {addl(dst, value) ; return; }
1735 if (value < 0) { decrementl(dst, -value); return; }
1736 if (value == 0) { ; return; }
1737 if (value == 1 && UseIncDec) { incl(dst) ; return; }
1738 /* else */ { addl(dst, value) ; return; }
1739 }
1740
1741 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
1742 assert(rscratch != noreg || always_reachable(dst), "missing");
1743 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
1744 if (reachable(dst)) {
1745 jmp_literal(dst.target(), dst.rspec());
1746 } else {
1747 lea(rscratch, dst);
1748 jmp(rscratch);
1749 }
1750 }
1751
1752 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
1753 assert(rscratch != noreg || always_reachable(dst), "missing");
1754 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
1755 if (reachable(dst)) {
1756 InstructionMark im(this);
1757 relocate(dst.reloc());
1758 const int short_size = 2;
1759 const int long_size = 6;
1760 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
1761 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
1762 // 0111 tttn #8-bit disp
1763 emit_int8(0x70 | cc);
1764 emit_int8((offs - short_size) & 0xFF);
1765 } else {
1766 // 0000 1111 1000 tttn #32-bit disp
1767 emit_int8(0x0F);
1768 emit_int8((unsigned char)(0x80 | cc));
1769 emit_int32(offs - long_size);
1770 }
1771 } else {
1772 #ifdef ASSERT
1773 warning("reversing conditional branch");
1774 #endif /* ASSERT */
1775 Label skip;
1776 jccb(reverse[cc], skip);
1777 lea(rscratch, dst);
1778 Assembler::jmp(rscratch);
1779 bind(skip);
1780 }
1781 }
1782
1783 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
1784 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
1785 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
1786
1787 stmxcsr(mxcsr_save);
1788 movl(tmp, mxcsr_save);
1789 if (EnableX86ECoreOpts) {
1790 // The mxcsr_std has status bits set for performance on ECore
1791 orl(tmp, 0x003f);
1792 } else {
1793 // Mask out status bits (only check control and mask bits)
1794 andl(tmp, 0xFFC0);
1795 }
1796 cmp32(tmp, mxcsr_std, rscratch);
1797 }
1798
1799 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
1800 assert(rscratch != noreg || always_reachable(src), "missing");
1801
1802 if (reachable(src)) {
1803 Assembler::ldmxcsr(as_Address(src));
1804 } else {
1805 lea(rscratch, src);
1806 Assembler::ldmxcsr(Address(rscratch, 0));
1807 }
1808 }
1809
1810 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1811 int off = offset();
1812 movsbl(dst, src); // movsxb
1813 return off;
1814 }
1815
1816 // Note: load_signed_short used to be called load_signed_word.
1817 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
1818 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
1819 // The term "word" in HotSpot means a 32- or 64-bit machine word.
1820 int MacroAssembler::load_signed_short(Register dst, Address src) {
1821 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
1822 // version but this is what 64bit has always done. This seems to imply
1823 // that users are only using 32bits worth.
1824 int off = offset();
1825 movswl(dst, src); // movsxw
1826 return off;
1827 }
1828
1829 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1830 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1831 // and "3.9 Partial Register Penalties", p. 22).
1832 int off = offset();
1833 movzbl(dst, src); // movzxb
1834 return off;
1835 }
1836
1837 // Note: load_unsigned_short used to be called load_unsigned_word.
1838 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1839 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
1840 // and "3.9 Partial Register Penalties", p. 22).
1841 int off = offset();
1842 movzwl(dst, src); // movzxw
1843 return off;
1844 }
1845
1846 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1847 switch (size_in_bytes) {
1848 case 8: movq(dst, src); break;
1849 case 4: movl(dst, src); break;
1850 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1851 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1852 default: ShouldNotReachHere();
1853 }
1854 }
1855
1856 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1857 switch (size_in_bytes) {
1858 case 8: movq(dst, src); break;
1859 case 4: movl(dst, src); break;
1860 case 2: movw(dst, src); break;
1861 case 1: movb(dst, src); break;
1862 default: ShouldNotReachHere();
1863 }
1864 }
1865
1866 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
1867 assert(rscratch != noreg || always_reachable(dst), "missing");
1868
1869 if (reachable(dst)) {
1870 movl(as_Address(dst), src);
1871 } else {
1872 lea(rscratch, dst);
1873 movl(Address(rscratch, 0), src);
1874 }
1875 }
1876
1877 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
1878 if (reachable(src)) {
1879 movl(dst, as_Address(src));
1880 } else {
1881 lea(dst, src);
1882 movl(dst, Address(dst, 0));
1883 }
1884 }
1885
1886 // C++ bool manipulation
1887
1888 void MacroAssembler::movbool(Register dst, Address src) {
1889 if(sizeof(bool) == 1)
1890 movb(dst, src);
1891 else if(sizeof(bool) == 2)
1892 movw(dst, src);
1893 else if(sizeof(bool) == 4)
1894 movl(dst, src);
1895 else
1896 // unsupported
1897 ShouldNotReachHere();
1898 }
1899
1900 void MacroAssembler::movbool(Address dst, bool boolconst) {
1901 if(sizeof(bool) == 1)
1902 movb(dst, (int) boolconst);
1903 else if(sizeof(bool) == 2)
1904 movw(dst, (int) boolconst);
1905 else if(sizeof(bool) == 4)
1906 movl(dst, (int) boolconst);
1907 else
1908 // unsupported
1909 ShouldNotReachHere();
1910 }
1911
1912 void MacroAssembler::movbool(Address dst, Register src) {
1913 if(sizeof(bool) == 1)
1914 movb(dst, src);
1915 else if(sizeof(bool) == 2)
1916 movw(dst, src);
1917 else if(sizeof(bool) == 4)
1918 movl(dst, src);
1919 else
1920 // unsupported
1921 ShouldNotReachHere();
1922 }
1923
1924 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1925 assert(rscratch != noreg || always_reachable(src), "missing");
1926
1927 if (reachable(src)) {
1928 movdl(dst, as_Address(src));
1929 } else {
1930 lea(rscratch, src);
1931 movdl(dst, Address(rscratch, 0));
1932 }
1933 }
1934
1935 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
1936 assert(rscratch != noreg || always_reachable(src), "missing");
1937
1938 if (reachable(src)) {
1939 movq(dst, as_Address(src));
1940 } else {
1941 lea(rscratch, src);
1942 movq(dst, Address(rscratch, 0));
1943 }
1944 }
1945
1946 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
1947 assert(rscratch != noreg || always_reachable(src), "missing");
1948
1949 if (reachable(src)) {
1950 if (UseXmmLoadAndClearUpper) {
1951 movsd (dst, as_Address(src));
1952 } else {
1953 movlpd(dst, as_Address(src));
1954 }
1955 } else {
1956 lea(rscratch, src);
1957 if (UseXmmLoadAndClearUpper) {
1958 movsd (dst, Address(rscratch, 0));
1959 } else {
1960 movlpd(dst, Address(rscratch, 0));
1961 }
1962 }
1963 }
1964
1965 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
1966 assert(rscratch != noreg || always_reachable(src), "missing");
1967
1968 if (reachable(src)) {
1969 movss(dst, as_Address(src));
1970 } else {
1971 lea(rscratch, src);
1972 movss(dst, Address(rscratch, 0));
1973 }
1974 }
1975
1976 void MacroAssembler::movptr(Register dst, Register src) {
1977 movq(dst, src);
1978 }
1979
1980 void MacroAssembler::movptr(Register dst, Address src) {
1981 movq(dst, src);
1982 }
1983
1984 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
1985 void MacroAssembler::movptr(Register dst, intptr_t src) {
1986 if (is_uimm32(src)) {
1987 movl(dst, checked_cast<uint32_t>(src));
1988 } else if (is_simm32(src)) {
1989 movq(dst, checked_cast<int32_t>(src));
1990 } else {
1991 mov64(dst, src);
1992 }
1993 }
1994
1995 void MacroAssembler::movptr(Address dst, Register src) {
1996 movq(dst, src);
1997 }
1998
1999 void MacroAssembler::movptr(Address dst, int32_t src) {
2000 movslq(dst, src);
2001 }
2002
2003 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
2004 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2005 Assembler::movdqu(dst, src);
2006 }
2007
2008 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
2009 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2010 Assembler::movdqu(dst, src);
2011 }
2012
2013 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
2014 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2015 Assembler::movdqu(dst, src);
2016 }
2017
2018 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2019 assert(rscratch != noreg || always_reachable(src), "missing");
2020
2021 if (reachable(src)) {
2022 movdqu(dst, as_Address(src));
2023 } else {
2024 lea(rscratch, src);
2025 movdqu(dst, Address(rscratch, 0));
2026 }
2027 }
2028
2029 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
2030 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2031 Assembler::vmovdqu(dst, src);
2032 }
2033
2034 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
2035 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2036 Assembler::vmovdqu(dst, src);
2037 }
2038
2039 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
2040 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
2041 Assembler::vmovdqu(dst, src);
2042 }
2043
2044 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
2045 assert(rscratch != noreg || always_reachable(src), "missing");
2046
2047 if (reachable(src)) {
2048 vmovdqu(dst, as_Address(src));
2049 }
2050 else {
2051 lea(rscratch, src);
2052 vmovdqu(dst, Address(rscratch, 0));
2053 }
2054 }
2055
2056 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2057 assert(rscratch != noreg || always_reachable(src), "missing");
2058
2059 if (vector_len == AVX_512bit) {
2060 evmovdquq(dst, src, AVX_512bit, rscratch);
2061 } else if (vector_len == AVX_256bit) {
2062 vmovdqu(dst, src, rscratch);
2063 } else {
2064 movdqu(dst, src, rscratch);
2065 }
2066 }
2067
2068 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
2069 if (vector_len == AVX_512bit) {
2070 evmovdquq(dst, src, AVX_512bit);
2071 } else if (vector_len == AVX_256bit) {
2072 vmovdqu(dst, src);
2073 } else {
2074 movdqu(dst, src);
2075 }
2076 }
2077
2078 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
2079 if (vector_len == AVX_512bit) {
2080 evmovdquq(dst, src, AVX_512bit);
2081 } else if (vector_len == AVX_256bit) {
2082 vmovdqu(dst, src);
2083 } else {
2084 movdqu(dst, src);
2085 }
2086 }
2087
2088 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
2089 if (vector_len == AVX_512bit) {
2090 evmovdquq(dst, src, AVX_512bit);
2091 } else if (vector_len == AVX_256bit) {
2092 vmovdqu(dst, src);
2093 } else {
2094 movdqu(dst, src);
2095 }
2096 }
2097
2098 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2099 assert(rscratch != noreg || always_reachable(src), "missing");
2100
2101 if (reachable(src)) {
2102 vmovdqa(dst, as_Address(src));
2103 }
2104 else {
2105 lea(rscratch, src);
2106 vmovdqa(dst, Address(rscratch, 0));
2107 }
2108 }
2109
2110 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2111 assert(rscratch != noreg || always_reachable(src), "missing");
2112
2113 if (vector_len == AVX_512bit) {
2114 evmovdqaq(dst, src, AVX_512bit, rscratch);
2115 } else if (vector_len == AVX_256bit) {
2116 vmovdqa(dst, src, rscratch);
2117 } else {
2118 movdqa(dst, src, rscratch);
2119 }
2120 }
2121
2122 void MacroAssembler::kmov(KRegister dst, Address src) {
2123 if (VM_Version::supports_avx512bw()) {
2124 kmovql(dst, src);
2125 } else {
2126 assert(VM_Version::supports_evex(), "");
2127 kmovwl(dst, src);
2128 }
2129 }
2130
2131 void MacroAssembler::kmov(Address dst, KRegister src) {
2132 if (VM_Version::supports_avx512bw()) {
2133 kmovql(dst, src);
2134 } else {
2135 assert(VM_Version::supports_evex(), "");
2136 kmovwl(dst, src);
2137 }
2138 }
2139
2140 void MacroAssembler::kmov(KRegister dst, KRegister src) {
2141 if (VM_Version::supports_avx512bw()) {
2142 kmovql(dst, src);
2143 } else {
2144 assert(VM_Version::supports_evex(), "");
2145 kmovwl(dst, src);
2146 }
2147 }
2148
2149 void MacroAssembler::kmov(Register dst, KRegister src) {
2150 if (VM_Version::supports_avx512bw()) {
2151 kmovql(dst, src);
2152 } else {
2153 assert(VM_Version::supports_evex(), "");
2154 kmovwl(dst, src);
2155 }
2156 }
2157
2158 void MacroAssembler::kmov(KRegister dst, Register src) {
2159 if (VM_Version::supports_avx512bw()) {
2160 kmovql(dst, src);
2161 } else {
2162 assert(VM_Version::supports_evex(), "");
2163 kmovwl(dst, src);
2164 }
2165 }
2166
2167 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
2168 assert(rscratch != noreg || always_reachable(src), "missing");
2169
2170 if (reachable(src)) {
2171 kmovql(dst, as_Address(src));
2172 } else {
2173 lea(rscratch, src);
2174 kmovql(dst, Address(rscratch, 0));
2175 }
2176 }
2177
2178 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
2179 assert(rscratch != noreg || always_reachable(src), "missing");
2180
2181 if (reachable(src)) {
2182 kmovwl(dst, as_Address(src));
2183 } else {
2184 lea(rscratch, src);
2185 kmovwl(dst, Address(rscratch, 0));
2186 }
2187 }
2188
2189 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2190 int vector_len, Register rscratch) {
2191 assert(rscratch != noreg || always_reachable(src), "missing");
2192
2193 if (reachable(src)) {
2194 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
2195 } else {
2196 lea(rscratch, src);
2197 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
2198 }
2199 }
2200
2201 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
2202 int vector_len, Register rscratch) {
2203 assert(rscratch != noreg || always_reachable(src), "missing");
2204
2205 if (reachable(src)) {
2206 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
2207 } else {
2208 lea(rscratch, src);
2209 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
2210 }
2211 }
2212
2213 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2214 assert(rscratch != noreg || always_reachable(src), "missing");
2215
2216 if (reachable(src)) {
2217 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
2218 } else {
2219 lea(rscratch, src);
2220 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
2221 }
2222 }
2223
2224 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2225 assert(rscratch != noreg || always_reachable(src), "missing");
2226
2227 if (reachable(src)) {
2228 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
2229 } else {
2230 lea(rscratch, src);
2231 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
2232 }
2233 }
2234
2235 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2236 assert(rscratch != noreg || always_reachable(src), "missing");
2237
2238 if (reachable(src)) {
2239 Assembler::evmovdquq(dst, as_Address(src), vector_len);
2240 } else {
2241 lea(rscratch, src);
2242 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
2243 }
2244 }
2245
2246 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
2247 assert(rscratch != noreg || always_reachable(src), "missing");
2248
2249 if (reachable(src)) {
2250 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
2251 } else {
2252 lea(rscratch, src);
2253 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
2254 }
2255 }
2256
2257 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2258 assert(rscratch != noreg || always_reachable(src), "missing");
2259
2260 if (reachable(src)) {
2261 Assembler::evmovdqaq(dst, as_Address(src), vector_len);
2262 } else {
2263 lea(rscratch, src);
2264 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
2265 }
2266 }
2267
2268 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2269 assert(rscratch != noreg || always_reachable(src), "missing");
2270
2271 if (reachable(src)) {
2272 Assembler::movapd(dst, as_Address(src));
2273 } else {
2274 lea(rscratch, src);
2275 Assembler::movapd(dst, Address(rscratch, 0));
2276 }
2277 }
2278
2279 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
2280 assert(rscratch != noreg || always_reachable(src), "missing");
2281
2282 if (reachable(src)) {
2283 Assembler::movdqa(dst, as_Address(src));
2284 } else {
2285 lea(rscratch, src);
2286 Assembler::movdqa(dst, Address(rscratch, 0));
2287 }
2288 }
2289
2290 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2291 assert(rscratch != noreg || always_reachable(src), "missing");
2292
2293 if (reachable(src)) {
2294 Assembler::movsd(dst, as_Address(src));
2295 } else {
2296 lea(rscratch, src);
2297 Assembler::movsd(dst, Address(rscratch, 0));
2298 }
2299 }
2300
2301 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2302 assert(rscratch != noreg || always_reachable(src), "missing");
2303
2304 if (reachable(src)) {
2305 Assembler::movss(dst, as_Address(src));
2306 } else {
2307 lea(rscratch, src);
2308 Assembler::movss(dst, Address(rscratch, 0));
2309 }
2310 }
2311
2312 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
2313 assert(rscratch != noreg || always_reachable(src), "missing");
2314
2315 if (reachable(src)) {
2316 Assembler::movddup(dst, as_Address(src));
2317 } else {
2318 lea(rscratch, src);
2319 Assembler::movddup(dst, Address(rscratch, 0));
2320 }
2321 }
2322
2323 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2324 assert(rscratch != noreg || always_reachable(src), "missing");
2325
2326 if (reachable(src)) {
2327 Assembler::vmovddup(dst, as_Address(src), vector_len);
2328 } else {
2329 lea(rscratch, src);
2330 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
2331 }
2332 }
2333
2334 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2335 assert(rscratch != noreg || always_reachable(src), "missing");
2336
2337 if (reachable(src)) {
2338 Assembler::mulsd(dst, as_Address(src));
2339 } else {
2340 lea(rscratch, src);
2341 Assembler::mulsd(dst, Address(rscratch, 0));
2342 }
2343 }
2344
2345 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2346 assert(rscratch != noreg || always_reachable(src), "missing");
2347
2348 if (reachable(src)) {
2349 Assembler::mulss(dst, as_Address(src));
2350 } else {
2351 lea(rscratch, src);
2352 Assembler::mulss(dst, Address(rscratch, 0));
2353 }
2354 }
2355
2356 void MacroAssembler::null_check(Register reg, int offset) {
2357 if (needs_explicit_null_check(offset)) {
2358 // provoke OS null exception if reg is null by
2359 // accessing M[reg] w/o changing any (non-CC) registers
2360 // NOTE: cmpl is plenty here to provoke a segv
2361 cmpptr(rax, Address(reg, 0));
2362 // Note: should probably use testl(rax, Address(reg, 0));
2363 // may be shorter code (however, this version of
2364 // testl needs to be implemented first)
2365 } else {
2366 // nothing to do, (later) access of M[reg + offset]
2367 // will provoke OS null exception if reg is null
2368 }
2369 }
2370
2371 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2372 andptr(markword, markWord::inline_type_mask_in_place);
2373 cmpptr(markword, markWord::inline_type_pattern);
2374 jcc(Assembler::equal, is_inline_type);
2375 }
2376
2377 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2378 if (can_be_null) {
2379 testptr(object, object);
2380 jcc(Assembler::zero, not_inline_type);
2381 }
2382 const int is_inline_type_mask = markWord::inline_type_pattern;
2383 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2384 andptr(tmp, is_inline_type_mask);
2385 cmpptr(tmp, is_inline_type_mask);
2386 jcc(Assembler::notEqual, not_inline_type);
2387 }
2388
2389 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2390 movl(temp_reg, flags);
2391 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2392 jcc(Assembler::notEqual, is_null_free_inline_type);
2393 }
2394
2395 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2396 movl(temp_reg, flags);
2397 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
2398 jcc(Assembler::equal, not_null_free_inline_type);
2399 }
2400
2401 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2402 movl(temp_reg, flags);
2403 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
2404 jcc(Assembler::notEqual, is_flat);
2405 }
2406
2407 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2408 movl(temp_reg, flags);
2409 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
2410 jcc(Assembler::notEqual, has_null_marker);
2411 }
2412
2413 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2414 Label test_mark_word;
2415 // load mark word
2416 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2417 // check displaced
2418 testl(temp_reg, markWord::unlocked_value);
2419 jccb(Assembler::notZero, test_mark_word);
2420 // slow path use klass prototype
2421 push(rscratch1);
2422 load_prototype_header(temp_reg, oop, rscratch1);
2423 pop(rscratch1);
2424
2425 bind(test_mark_word);
2426 testl(temp_reg, test_bit);
2427 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
2428 }
2429
2430 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
2431 Label& is_flat_array) {
2432 #ifdef _LP64
2433 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2434 #else
2435 load_klass(temp_reg, oop, noreg);
2436 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2437 test_flat_array_layout(temp_reg, is_flat_array);
2438 #endif
2439 }
2440
2441 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2442 Label& is_non_flat_array) {
2443 #ifdef _LP64
2444 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2445 #else
2446 load_klass(temp_reg, oop, noreg);
2447 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
2448 test_non_flat_array_layout(temp_reg, is_non_flat_array);
2449 #endif
2450 }
2451
2452 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
2453 #ifdef _LP64
2454 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2455 #else
2456 Unimplemented();
2457 #endif
2458 }
2459
2460 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2461 #ifdef _LP64
2462 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2463 #else
2464 Unimplemented();
2465 #endif
2466 }
2467
2468 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2469 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2470 jcc(Assembler::notZero, is_flat_array);
2471 }
2472
2473 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2474 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2475 jcc(Assembler::zero, is_non_flat_array);
2476 }
2477
2478 void MacroAssembler::os_breakpoint() {
2479 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
2480 // (e.g., MSVC can't call ps() otherwise)
2481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
2482 }
2483
2484 void MacroAssembler::unimplemented(const char* what) {
2485 const char* buf = nullptr;
2486 {
2487 ResourceMark rm;
2488 stringStream ss;
2489 ss.print("unimplemented: %s", what);
2490 buf = code_string(ss.as_string());
2491 }
2492 stop(buf);
2493 }
2494
2495 #define XSTATE_BV 0x200
2496
2497 void MacroAssembler::pop_CPU_state() {
2498 pop_FPU_state();
2499 pop_IU_state();
2500 }
2501
2502 void MacroAssembler::pop_FPU_state() {
2503 fxrstor(Address(rsp, 0));
2504 addptr(rsp, FPUStateSizeInWords * wordSize);
2505 }
2506
2507 void MacroAssembler::pop_IU_state() {
2508 popa();
2509 addq(rsp, 8);
2510 popf();
2511 }
2512
2513 // Save Integer and Float state
2514 // Warning: Stack must be 16 byte aligned (64bit)
2515 void MacroAssembler::push_CPU_state() {
2516 push_IU_state();
2517 push_FPU_state();
2518 }
2519
2520 void MacroAssembler::push_FPU_state() {
2521 subptr(rsp, FPUStateSizeInWords * wordSize);
2522 fxsave(Address(rsp, 0));
2523 }
2524
2525 void MacroAssembler::push_IU_state() {
2526 // Push flags first because pusha kills them
2527 pushf();
2528 // Make sure rsp stays 16-byte aligned
2529 subq(rsp, 8);
2530 pusha();
2531 }
2532
2533 void MacroAssembler::push_cont_fastpath() {
2534 if (!Continuations::enabled()) return;
2535
2536 Label L_done;
2537 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2538 jccb(Assembler::belowEqual, L_done);
2539 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
2540 bind(L_done);
2541 }
2542
2543 void MacroAssembler::pop_cont_fastpath() {
2544 if (!Continuations::enabled()) return;
2545
2546 Label L_done;
2547 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
2548 jccb(Assembler::below, L_done);
2549 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
2550 bind(L_done);
2551 }
2552
2553 #ifdef ASSERT
2554 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
2555 Label no_cont;
2556 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
2557 testl(cont, cont);
2558 jcc(Assembler::zero, no_cont);
2559 stop(name);
2560 bind(no_cont);
2561 }
2562 #endif
2563
2564 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
2565 // we must set sp to zero to clear frame
2566 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
2567 // must clear fp, so that compiled frames are not confused; it is
2568 // possible that we need it only for debugging
2569 if (clear_fp) {
2570 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2571 }
2572 // Always clear the pc because it could have been set by make_walkable()
2573 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
2574 vzeroupper();
2575 }
2576
2577 void MacroAssembler::round_to(Register reg, int modulus) {
2578 addptr(reg, modulus - 1);
2579 andptr(reg, -modulus);
2580 }
2581
2582 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
2583 if (at_return) {
2584 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
2585 // we may safely use rsp instead to perform the stack watermark check.
2586 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
2587 jcc(Assembler::above, slow_path);
2588 return;
2589 }
2590 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2591 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
2592 }
2593
2594 // Calls to C land
2595 //
2596 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
2597 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
2598 // has to be reset to 0. This is required to allow proper stack traversal.
2599 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2600 Register last_java_fp,
2601 address last_java_pc,
2602 Register rscratch) {
2603 vzeroupper();
2604 // determine last_java_sp register
2605 if (!last_java_sp->is_valid()) {
2606 last_java_sp = rsp;
2607 }
2608 // last_java_fp is optional
2609 if (last_java_fp->is_valid()) {
2610 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
2611 }
2612 // last_java_pc is optional
2613 if (last_java_pc != nullptr) {
2614 Address java_pc(r15_thread,
2615 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
2616 lea(java_pc, InternalAddress(last_java_pc), rscratch);
2617 }
2618 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
2619 }
2620
2621 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
2622 Register last_java_fp,
2623 Label &L,
2624 Register scratch) {
2625 lea(scratch, L);
2626 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
2627 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
2628 }
2629
2630 void MacroAssembler::shlptr(Register dst, int imm8) {
2631 shlq(dst, imm8);
2632 }
2633
2634 void MacroAssembler::shrptr(Register dst, int imm8) {
2635 shrq(dst, imm8);
2636 }
2637
2638 void MacroAssembler::sign_extend_byte(Register reg) {
2639 movsbl(reg, reg); // movsxb
2640 }
2641
2642 void MacroAssembler::sign_extend_short(Register reg) {
2643 movswl(reg, reg); // movsxw
2644 }
2645
2646 void MacroAssembler::testl(Address dst, int32_t imm32) {
2647 if (imm32 >= 0 && is8bit(imm32)) {
2648 testb(dst, imm32);
2649 } else {
2650 Assembler::testl(dst, imm32);
2651 }
2652 }
2653
2654 void MacroAssembler::testl(Register dst, int32_t imm32) {
2655 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
2656 testb(dst, imm32);
2657 } else {
2658 Assembler::testl(dst, imm32);
2659 }
2660 }
2661
2662 void MacroAssembler::testl(Register dst, AddressLiteral src) {
2663 assert(always_reachable(src), "Address should be reachable");
2664 testl(dst, as_Address(src));
2665 }
2666
2667 void MacroAssembler::testq(Address dst, int32_t imm32) {
2668 if (imm32 >= 0) {
2669 testl(dst, imm32);
2670 } else {
2671 Assembler::testq(dst, imm32);
2672 }
2673 }
2674
2675 void MacroAssembler::testq(Register dst, int32_t imm32) {
2676 if (imm32 >= 0) {
2677 testl(dst, imm32);
2678 } else {
2679 Assembler::testq(dst, imm32);
2680 }
2681 }
2682
2683 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
2684 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2685 Assembler::pcmpeqb(dst, src);
2686 }
2687
2688 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
2689 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2690 Assembler::pcmpeqw(dst, src);
2691 }
2692
2693 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2694 assert((dst->encoding() < 16),"XMM register should be 0-15");
2695 Assembler::pcmpestri(dst, src, imm8);
2696 }
2697
2698 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2699 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2700 Assembler::pcmpestri(dst, src, imm8);
2701 }
2702
2703 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2704 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2705 Assembler::pmovzxbw(dst, src);
2706 }
2707
2708 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
2709 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2710 Assembler::pmovzxbw(dst, src);
2711 }
2712
2713 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
2714 assert((src->encoding() < 16),"XMM register should be 0-15");
2715 Assembler::pmovmskb(dst, src);
2716 }
2717
2718 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
2719 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
2720 Assembler::ptest(dst, src);
2721 }
2722
2723 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2724 assert(rscratch != noreg || always_reachable(src), "missing");
2725
2726 if (reachable(src)) {
2727 Assembler::sqrtss(dst, as_Address(src));
2728 } else {
2729 lea(rscratch, src);
2730 Assembler::sqrtss(dst, Address(rscratch, 0));
2731 }
2732 }
2733
2734 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2735 assert(rscratch != noreg || always_reachable(src), "missing");
2736
2737 if (reachable(src)) {
2738 Assembler::subsd(dst, as_Address(src));
2739 } else {
2740 lea(rscratch, src);
2741 Assembler::subsd(dst, Address(rscratch, 0));
2742 }
2743 }
2744
2745 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
2746 assert(rscratch != noreg || always_reachable(src), "missing");
2747
2748 if (reachable(src)) {
2749 Assembler::roundsd(dst, as_Address(src), rmode);
2750 } else {
2751 lea(rscratch, src);
2752 Assembler::roundsd(dst, Address(rscratch, 0), rmode);
2753 }
2754 }
2755
2756 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2757 assert(rscratch != noreg || always_reachable(src), "missing");
2758
2759 if (reachable(src)) {
2760 Assembler::subss(dst, as_Address(src));
2761 } else {
2762 lea(rscratch, src);
2763 Assembler::subss(dst, Address(rscratch, 0));
2764 }
2765 }
2766
2767 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2768 assert(rscratch != noreg || always_reachable(src), "missing");
2769
2770 if (reachable(src)) {
2771 Assembler::ucomisd(dst, as_Address(src));
2772 } else {
2773 lea(rscratch, src);
2774 Assembler::ucomisd(dst, Address(rscratch, 0));
2775 }
2776 }
2777
2778 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
2779 assert(rscratch != noreg || always_reachable(src), "missing");
2780
2781 if (reachable(src)) {
2782 Assembler::ucomiss(dst, as_Address(src));
2783 } else {
2784 lea(rscratch, src);
2785 Assembler::ucomiss(dst, Address(rscratch, 0));
2786 }
2787 }
2788
2789 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
2790 assert(rscratch != noreg || always_reachable(src), "missing");
2791
2792 // Used in sign-bit flipping with aligned address.
2793 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2794
2795 if (UseAVX > 2 &&
2796 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2797 (dst->encoding() >= 16)) {
2798 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2799 } else if (reachable(src)) {
2800 Assembler::xorpd(dst, as_Address(src));
2801 } else {
2802 lea(rscratch, src);
2803 Assembler::xorpd(dst, Address(rscratch, 0));
2804 }
2805 }
2806
2807 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
2808 if (UseAVX > 2 &&
2809 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2810 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2811 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2812 } else {
2813 Assembler::xorpd(dst, src);
2814 }
2815 }
2816
2817 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
2818 if (UseAVX > 2 &&
2819 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2820 ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
2821 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
2822 } else {
2823 Assembler::xorps(dst, src);
2824 }
2825 }
2826
2827 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
2828 assert(rscratch != noreg || always_reachable(src), "missing");
2829
2830 // Used in sign-bit flipping with aligned address.
2831 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
2832
2833 if (UseAVX > 2 &&
2834 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
2835 (dst->encoding() >= 16)) {
2836 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
2837 } else if (reachable(src)) {
2838 Assembler::xorps(dst, as_Address(src));
2839 } else {
2840 lea(rscratch, src);
2841 Assembler::xorps(dst, Address(rscratch, 0));
2842 }
2843 }
2844
2845 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
2846 assert(rscratch != noreg || always_reachable(src), "missing");
2847
2848 // Used in sign-bit flipping with aligned address.
2849 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
2850 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
2851 if (reachable(src)) {
2852 Assembler::pshufb(dst, as_Address(src));
2853 } else {
2854 lea(rscratch, src);
2855 Assembler::pshufb(dst, Address(rscratch, 0));
2856 }
2857 }
2858
2859 // AVX 3-operands instructions
2860
2861 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2862 assert(rscratch != noreg || always_reachable(src), "missing");
2863
2864 if (reachable(src)) {
2865 vaddsd(dst, nds, as_Address(src));
2866 } else {
2867 lea(rscratch, src);
2868 vaddsd(dst, nds, Address(rscratch, 0));
2869 }
2870 }
2871
2872 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
2873 assert(rscratch != noreg || always_reachable(src), "missing");
2874
2875 if (reachable(src)) {
2876 vaddss(dst, nds, as_Address(src));
2877 } else {
2878 lea(rscratch, src);
2879 vaddss(dst, nds, Address(rscratch, 0));
2880 }
2881 }
2882
2883 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2884 assert(UseAVX > 0, "requires some form of AVX");
2885 assert(rscratch != noreg || always_reachable(src), "missing");
2886
2887 if (reachable(src)) {
2888 Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
2889 } else {
2890 lea(rscratch, src);
2891 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
2892 }
2893 }
2894
2895 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2896 assert(UseAVX > 0, "requires some form of AVX");
2897 assert(rscratch != noreg || always_reachable(src), "missing");
2898
2899 if (reachable(src)) {
2900 Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
2901 } else {
2902 lea(rscratch, src);
2903 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
2904 }
2905 }
2906
2907 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2908 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2909 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2910
2911 vandps(dst, nds, negate_field, vector_len, rscratch);
2912 }
2913
2914 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
2915 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
2916 assert(rscratch != noreg || always_reachable(negate_field), "missing");
2917
2918 vandpd(dst, nds, negate_field, vector_len, rscratch);
2919 }
2920
2921 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2922 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2923 Assembler::vpaddb(dst, nds, src, vector_len);
2924 }
2925
2926 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2927 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2928 Assembler::vpaddb(dst, nds, src, vector_len);
2929 }
2930
2931 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
2932 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2933 Assembler::vpaddw(dst, nds, src, vector_len);
2934 }
2935
2936 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
2937 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
2938 Assembler::vpaddw(dst, nds, src, vector_len);
2939 }
2940
2941 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
2942 assert(rscratch != noreg || always_reachable(src), "missing");
2943
2944 if (reachable(src)) {
2945 Assembler::vpand(dst, nds, as_Address(src), vector_len);
2946 } else {
2947 lea(rscratch, src);
2948 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
2949 }
2950 }
2951
2952 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2953 assert(rscratch != noreg || always_reachable(src), "missing");
2954
2955 if (reachable(src)) {
2956 Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
2957 } else {
2958 lea(rscratch, src);
2959 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
2960 }
2961 }
2962
2963 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2964 assert(rscratch != noreg || always_reachable(src), "missing");
2965
2966 if (reachable(src)) {
2967 Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
2968 } else {
2969 lea(rscratch, src);
2970 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
2971 }
2972 }
2973
2974 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2975 assert(rscratch != noreg || always_reachable(src), "missing");
2976
2977 if (reachable(src)) {
2978 Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
2979 } else {
2980 lea(rscratch, src);
2981 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
2982 }
2983 }
2984
2985 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2986 assert(rscratch != noreg || always_reachable(src), "missing");
2987
2988 if (reachable(src)) {
2989 Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
2990 } else {
2991 lea(rscratch, src);
2992 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
2993 }
2994 }
2995
2996 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
2997 assert(rscratch != noreg || always_reachable(src), "missing");
2998
2999 if (reachable(src)) {
3000 Assembler::vbroadcastss(dst, as_Address(src), vector_len);
3001 } else {
3002 lea(rscratch, src);
3003 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
3004 }
3005 }
3006
3007 // Vector float blend
3008 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3009 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3010 // WARN: Allow dst == (src1|src2), mask == scratch
3011 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3012 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3013 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
3014 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3015 if (blend_emulation && scratch_available && dst_available) {
3016 if (compute_mask) {
3017 vpsrad(scratch, mask, 32, vector_len);
3018 mask = scratch;
3019 }
3020 if (dst == src1) {
3021 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1
3022 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3023 } else {
3024 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3025 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
3026 }
3027 vpor(dst, dst, scratch, vector_len);
3028 } else {
3029 Assembler::vblendvps(dst, src1, src2, mask, vector_len);
3030 }
3031 }
3032
3033 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
3034 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
3035 // WARN: Allow dst == (src1|src2), mask == scratch
3036 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
3037 !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
3038 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
3039 bool dst_available = dst != mask && (dst != src1 || dst != src2);
3040 if (blend_emulation && scratch_available && dst_available) {
3041 if (compute_mask) {
3042 vpxor(scratch, scratch, scratch, vector_len);
3043 vpcmpgtq(scratch, scratch, mask, vector_len);
3044 mask = scratch;
3045 }
3046 if (dst == src1) {
3047 vpandn(dst, mask, src1, vector_len); // if mask == 0, src
3048 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
3049 } else {
3050 vpand (dst, mask, src2, vector_len); // if mask == 1, src2
3051 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
3052 }
3053 vpor(dst, dst, scratch, vector_len);
3054 } else {
3055 Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
3056 }
3057 }
3058
3059 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3060 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3061 Assembler::vpcmpeqb(dst, nds, src, vector_len);
3062 }
3063
3064 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
3065 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3066 Assembler::vpcmpeqb(dst, src1, src2, vector_len);
3067 }
3068
3069 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3070 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3071 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3072 }
3073
3074 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3075 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3076 Assembler::vpcmpeqw(dst, nds, src, vector_len);
3077 }
3078
3079 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3080 assert(rscratch != noreg || always_reachable(src), "missing");
3081
3082 if (reachable(src)) {
3083 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
3084 } else {
3085 lea(rscratch, src);
3086 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
3087 }
3088 }
3089
3090 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3091 int comparison, bool is_signed, int vector_len, Register rscratch) {
3092 assert(rscratch != noreg || always_reachable(src), "missing");
3093
3094 if (reachable(src)) {
3095 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3096 } else {
3097 lea(rscratch, src);
3098 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3099 }
3100 }
3101
3102 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3103 int comparison, bool is_signed, int vector_len, Register rscratch) {
3104 assert(rscratch != noreg || always_reachable(src), "missing");
3105
3106 if (reachable(src)) {
3107 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3108 } else {
3109 lea(rscratch, src);
3110 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3111 }
3112 }
3113
3114 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3115 int comparison, bool is_signed, int vector_len, Register rscratch) {
3116 assert(rscratch != noreg || always_reachable(src), "missing");
3117
3118 if (reachable(src)) {
3119 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3120 } else {
3121 lea(rscratch, src);
3122 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3123 }
3124 }
3125
3126 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
3127 int comparison, bool is_signed, int vector_len, Register rscratch) {
3128 assert(rscratch != noreg || always_reachable(src), "missing");
3129
3130 if (reachable(src)) {
3131 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
3132 } else {
3133 lea(rscratch, src);
3134 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
3135 }
3136 }
3137
3138 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
3139 if (width == Assembler::Q) {
3140 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
3141 } else {
3142 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
3143 }
3144 }
3145
3146 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
3147 int eq_cond_enc = 0x29;
3148 int gt_cond_enc = 0x37;
3149 if (width != Assembler::Q) {
3150 eq_cond_enc = 0x74 + width;
3151 gt_cond_enc = 0x64 + width;
3152 }
3153 switch (cond) {
3154 case eq:
3155 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3156 break;
3157 case neq:
3158 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
3159 vallones(xtmp, vector_len);
3160 vpxor(dst, xtmp, dst, vector_len);
3161 break;
3162 case le:
3163 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3164 vallones(xtmp, vector_len);
3165 vpxor(dst, xtmp, dst, vector_len);
3166 break;
3167 case nlt:
3168 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3169 vallones(xtmp, vector_len);
3170 vpxor(dst, xtmp, dst, vector_len);
3171 break;
3172 case lt:
3173 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
3174 break;
3175 case nle:
3176 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
3177 break;
3178 default:
3179 assert(false, "Should not reach here");
3180 }
3181 }
3182
3183 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3184 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3185 Assembler::vpmovzxbw(dst, src, vector_len);
3186 }
3187
3188 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
3189 assert((src->encoding() < 16),"XMM register should be 0-15");
3190 Assembler::vpmovmskb(dst, src, vector_len);
3191 }
3192
3193 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3194 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3195 Assembler::vpmullw(dst, nds, src, vector_len);
3196 }
3197
3198 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3199 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3200 Assembler::vpmullw(dst, nds, src, vector_len);
3201 }
3202
3203 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3204 assert((UseAVX > 0), "AVX support is needed");
3205 assert(rscratch != noreg || always_reachable(src), "missing");
3206
3207 if (reachable(src)) {
3208 Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
3209 } else {
3210 lea(rscratch, src);
3211 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
3212 }
3213 }
3214
3215 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3216 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3217 Assembler::vpsubb(dst, nds, src, vector_len);
3218 }
3219
3220 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3221 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3222 Assembler::vpsubb(dst, nds, src, vector_len);
3223 }
3224
3225 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3226 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3227 Assembler::vpsubw(dst, nds, src, vector_len);
3228 }
3229
3230 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3231 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3232 Assembler::vpsubw(dst, nds, src, vector_len);
3233 }
3234
3235 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3236 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3237 Assembler::vpsraw(dst, nds, shift, vector_len);
3238 }
3239
3240 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3241 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3242 Assembler::vpsraw(dst, nds, shift, vector_len);
3243 }
3244
3245 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3246 assert(UseAVX > 2,"");
3247 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3248 vector_len = 2;
3249 }
3250 Assembler::evpsraq(dst, nds, shift, vector_len);
3251 }
3252
3253 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3254 assert(UseAVX > 2,"");
3255 if (!VM_Version::supports_avx512vl() && vector_len < 2) {
3256 vector_len = 2;
3257 }
3258 Assembler::evpsraq(dst, nds, shift, vector_len);
3259 }
3260
3261 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3262 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3263 Assembler::vpsrlw(dst, nds, shift, vector_len);
3264 }
3265
3266 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3267 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3268 Assembler::vpsrlw(dst, nds, shift, vector_len);
3269 }
3270
3271 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
3272 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3273 Assembler::vpsllw(dst, nds, shift, vector_len);
3274 }
3275
3276 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
3277 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3278 Assembler::vpsllw(dst, nds, shift, vector_len);
3279 }
3280
3281 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
3282 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
3283 Assembler::vptest(dst, src);
3284 }
3285
3286 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3287 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3288 Assembler::punpcklbw(dst, src);
3289 }
3290
3291 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
3292 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
3293 Assembler::pshufd(dst, src, mode);
3294 }
3295
3296 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
3297 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
3298 Assembler::pshuflw(dst, src, mode);
3299 }
3300
3301 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3302 assert(rscratch != noreg || always_reachable(src), "missing");
3303
3304 if (reachable(src)) {
3305 vandpd(dst, nds, as_Address(src), vector_len);
3306 } else {
3307 lea(rscratch, src);
3308 vandpd(dst, nds, Address(rscratch, 0), vector_len);
3309 }
3310 }
3311
3312 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3313 assert(rscratch != noreg || always_reachable(src), "missing");
3314
3315 if (reachable(src)) {
3316 vandps(dst, nds, as_Address(src), vector_len);
3317 } else {
3318 lea(rscratch, src);
3319 vandps(dst, nds, Address(rscratch, 0), vector_len);
3320 }
3321 }
3322
3323 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
3324 bool merge, int vector_len, Register rscratch) {
3325 assert(rscratch != noreg || always_reachable(src), "missing");
3326
3327 if (reachable(src)) {
3328 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
3329 } else {
3330 lea(rscratch, src);
3331 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
3332 }
3333 }
3334
3335 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3336 assert(rscratch != noreg || always_reachable(src), "missing");
3337
3338 if (reachable(src)) {
3339 vdivsd(dst, nds, as_Address(src));
3340 } else {
3341 lea(rscratch, src);
3342 vdivsd(dst, nds, Address(rscratch, 0));
3343 }
3344 }
3345
3346 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3347 assert(rscratch != noreg || always_reachable(src), "missing");
3348
3349 if (reachable(src)) {
3350 vdivss(dst, nds, as_Address(src));
3351 } else {
3352 lea(rscratch, src);
3353 vdivss(dst, nds, Address(rscratch, 0));
3354 }
3355 }
3356
3357 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3358 assert(rscratch != noreg || always_reachable(src), "missing");
3359
3360 if (reachable(src)) {
3361 vmulsd(dst, nds, as_Address(src));
3362 } else {
3363 lea(rscratch, src);
3364 vmulsd(dst, nds, Address(rscratch, 0));
3365 }
3366 }
3367
3368 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3369 assert(rscratch != noreg || always_reachable(src), "missing");
3370
3371 if (reachable(src)) {
3372 vmulss(dst, nds, as_Address(src));
3373 } else {
3374 lea(rscratch, src);
3375 vmulss(dst, nds, Address(rscratch, 0));
3376 }
3377 }
3378
3379 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3380 assert(rscratch != noreg || always_reachable(src), "missing");
3381
3382 if (reachable(src)) {
3383 vsubsd(dst, nds, as_Address(src));
3384 } else {
3385 lea(rscratch, src);
3386 vsubsd(dst, nds, Address(rscratch, 0));
3387 }
3388 }
3389
3390 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3391 assert(rscratch != noreg || always_reachable(src), "missing");
3392
3393 if (reachable(src)) {
3394 vsubss(dst, nds, as_Address(src));
3395 } else {
3396 lea(rscratch, src);
3397 vsubss(dst, nds, Address(rscratch, 0));
3398 }
3399 }
3400
3401 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3402 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3403 assert(rscratch != noreg || always_reachable(src), "missing");
3404
3405 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
3406 }
3407
3408 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
3409 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
3410 assert(rscratch != noreg || always_reachable(src), "missing");
3411
3412 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
3413 }
3414
3415 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3416 assert(rscratch != noreg || always_reachable(src), "missing");
3417
3418 if (reachable(src)) {
3419 vxorpd(dst, nds, as_Address(src), vector_len);
3420 } else {
3421 lea(rscratch, src);
3422 vxorpd(dst, nds, Address(rscratch, 0), vector_len);
3423 }
3424 }
3425
3426 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3427 assert(rscratch != noreg || always_reachable(src), "missing");
3428
3429 if (reachable(src)) {
3430 vxorps(dst, nds, as_Address(src), vector_len);
3431 } else {
3432 lea(rscratch, src);
3433 vxorps(dst, nds, Address(rscratch, 0), vector_len);
3434 }
3435 }
3436
3437 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3438 assert(rscratch != noreg || always_reachable(src), "missing");
3439
3440 if (UseAVX > 1 || (vector_len < 1)) {
3441 if (reachable(src)) {
3442 Assembler::vpxor(dst, nds, as_Address(src), vector_len);
3443 } else {
3444 lea(rscratch, src);
3445 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
3446 }
3447 } else {
3448 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
3449 }
3450 }
3451
3452 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
3453 assert(rscratch != noreg || always_reachable(src), "missing");
3454
3455 if (reachable(src)) {
3456 Assembler::vpermd(dst, nds, as_Address(src), vector_len);
3457 } else {
3458 lea(rscratch, src);
3459 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
3460 }
3461 }
3462
3463 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
3464 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
3465 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
3466 // The inverted mask is sign-extended
3467 andptr(possibly_non_local, inverted_mask);
3468 }
3469
3470 void MacroAssembler::resolve_jobject(Register value,
3471 Register tmp) {
3472 Register thread = r15_thread;
3473 assert_different_registers(value, thread, tmp);
3474 Label done, tagged, weak_tagged;
3475 testptr(value, value);
3476 jcc(Assembler::zero, done); // Use null as-is.
3477 testptr(value, JNIHandles::tag_mask); // Test for tag.
3478 jcc(Assembler::notZero, tagged);
3479
3480 // Resolve local handle
3481 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
3482 verify_oop(value);
3483 jmp(done);
3484
3485 bind(tagged);
3486 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
3487 jcc(Assembler::notZero, weak_tagged);
3488
3489 // Resolve global handle
3490 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3491 verify_oop(value);
3492 jmp(done);
3493
3494 bind(weak_tagged);
3495 // Resolve jweak.
3496 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3497 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
3498 verify_oop(value);
3499
3500 bind(done);
3501 }
3502
3503 void MacroAssembler::resolve_global_jobject(Register value,
3504 Register tmp) {
3505 Register thread = r15_thread;
3506 assert_different_registers(value, thread, tmp);
3507 Label done;
3508
3509 testptr(value, value);
3510 jcc(Assembler::zero, done); // Use null as-is.
3511
3512 #ifdef ASSERT
3513 {
3514 Label valid_global_tag;
3515 testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
3516 jcc(Assembler::notZero, valid_global_tag);
3517 stop("non global jobject using resolve_global_jobject");
3518 bind(valid_global_tag);
3519 }
3520 #endif
3521
3522 // Resolve global handle
3523 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
3524 verify_oop(value);
3525
3526 bind(done);
3527 }
3528
3529 void MacroAssembler::subptr(Register dst, int32_t imm32) {
3530 subq(dst, imm32);
3531 }
3532
3533 // Force generation of a 4 byte immediate value even if it fits into 8bit
3534 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
3535 subq_imm32(dst, imm32);
3536 }
3537
3538 void MacroAssembler::subptr(Register dst, Register src) {
3539 subq(dst, src);
3540 }
3541
3542 // C++ bool manipulation
3543 void MacroAssembler::testbool(Register dst) {
3544 if(sizeof(bool) == 1)
3545 testb(dst, 0xff);
3546 else if(sizeof(bool) == 2) {
3547 // testw implementation needed for two byte bools
3548 ShouldNotReachHere();
3549 } else if(sizeof(bool) == 4)
3550 testl(dst, dst);
3551 else
3552 // unsupported
3553 ShouldNotReachHere();
3554 }
3555
3556 void MacroAssembler::testptr(Register dst, Register src) {
3557 testq(dst, src);
3558 }
3559
3560 // Object / value buffer allocation...
3561 //
3562 // Kills klass and rsi on LP64
3563 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
3564 Register t1, Register t2,
3565 bool clear_fields, Label& alloc_failed)
3566 {
3567 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
3568 Register layout_size = t1;
3569 assert(new_obj == rax, "needs to be rax");
3570 assert_different_registers(klass, new_obj, t1, t2);
3571
3572 // get instance_size in InstanceKlass (scaled to a count of bytes)
3573 movl(layout_size, Address(klass, Klass::layout_helper_offset()));
3574 // test to see if it is malformed in some way
3575 testl(layout_size, Klass::_lh_instance_slow_path_bit);
3576 jcc(Assembler::notZero, slow_case_no_pop);
3577
3578 // Allocate the instance:
3579 // If TLAB is enabled:
3580 // Try to allocate in the TLAB.
3581 // If fails, go to the slow path.
3582 // Else If inline contiguous allocations are enabled:
3583 // Try to allocate in eden.
3584 // If fails due to heap end, go to slow path.
3585 //
3586 // If TLAB is enabled OR inline contiguous is enabled:
3587 // Initialize the allocation.
3588 // Exit.
3589 //
3590 // Go to slow path.
3591
3592 push(klass);
3593 if (UseTLAB) {
3594 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
3595 if (ZeroTLAB || (!clear_fields)) {
3596 // the fields have been already cleared
3597 jmp(initialize_header);
3598 } else {
3599 // initialize both the header and fields
3600 jmp(initialize_object);
3601 }
3602 } else {
3603 jmp(slow_case);
3604 }
3605
3606 // If UseTLAB is true, the object is created above and there is an initialize need.
3607 // Otherwise, skip and go to the slow path.
3608 if (UseTLAB) {
3609 if (clear_fields) {
3610 // The object is initialized before the header. If the object size is
3611 // zero, go directly to the header initialization.
3612 bind(initialize_object);
3613 if (UseCompactObjectHeaders) {
3614 assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
3615 decrement(layout_size, oopDesc::base_offset_in_bytes());
3616 } else {
3617 decrement(layout_size, sizeof(oopDesc));
3618 }
3619 jcc(Assembler::zero, initialize_header);
3620
3621 // Initialize topmost object field, divide size by 8, check if odd and
3622 // test if zero.
3623 Register zero = klass;
3624 xorl(zero, zero); // use zero reg to clear memory (shorter code)
3625 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3626
3627 #ifdef ASSERT
3628 // make sure instance_size was multiple of 8
3629 Label L;
3630 // Ignore partial flag stall after shrl() since it is debug VM
3631 jcc(Assembler::carryClear, L);
3632 stop("object size is not multiple of 2 - adjust this code");
3633 bind(L);
3634 // must be > 0, no extra check needed here
3635 #endif
3636
3637 // initialize remaining object fields: instance_size was a multiple of 8
3638 {
3639 Label loop;
3640 bind(loop);
3641 int header_size_bytes = oopDesc::header_size() * HeapWordSize;
3642 assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
3643 movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
3644 decrement(layout_size);
3645 jcc(Assembler::notZero, loop);
3646 }
3647 } // clear_fields
3648
3649 // initialize object header only.
3650 bind(initialize_header);
3651 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
3652 pop(klass);
3653 Register mark_word = t2;
3654 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
3655 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
3656 } else {
3657 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
3658 (intptr_t)markWord::prototype().value()); // header
3659 pop(klass); // get saved klass back in the register.
3660 }
3661 if (!UseCompactObjectHeaders) {
3662 xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3663 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops
3664 movptr(t2, klass); // preserve klass
3665 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed
3666 }
3667 jmp(done);
3668 }
3669
3670 bind(slow_case);
3671 pop(klass);
3672 bind(slow_case_no_pop);
3673 jmp(alloc_failed);
3674
3675 bind(done);
3676 }
3677
3678 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3679 void MacroAssembler::tlab_allocate(Register obj,
3680 Register var_size_in_bytes,
3681 int con_size_in_bytes,
3682 Register t1,
3683 Register t2,
3684 Label& slow_case) {
3685 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3686 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
3687 }
3688
3689 RegSet MacroAssembler::call_clobbered_gp_registers() {
3690 RegSet regs;
3691 regs += RegSet::of(rax, rcx, rdx);
3692 #ifndef _WINDOWS
3693 regs += RegSet::of(rsi, rdi);
3694 #endif
3695 regs += RegSet::range(r8, r11);
3696 if (UseAPX) {
3697 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
3698 }
3699 return regs;
3700 }
3701
3702 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
3703 int num_xmm_registers = XMMRegister::available_xmm_registers();
3704 #if defined(_WINDOWS)
3705 XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
3706 if (num_xmm_registers > 16) {
3707 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
3708 }
3709 return result;
3710 #else
3711 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
3712 #endif
3713 }
3714
3715 // C1 only ever uses the first double/float of the XMM register.
3716 static int xmm_save_size() { return sizeof(double); }
3717
3718 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3719 masm->movdbl(Address(rsp, offset), reg);
3720 }
3721
3722 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
3723 masm->movdbl(reg, Address(rsp, offset));
3724 }
3725
3726 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
3727 bool save_fpu, int& gp_area_size, int& xmm_area_size) {
3728
3729 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
3730 StackAlignmentInBytes);
3731 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
3732
3733 return gp_area_size + xmm_area_size;
3734 }
3735
3736 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
3737 block_comment("push_call_clobbered_registers start");
3738 // Regular registers
3739 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
3740
3741 int gp_area_size;
3742 int xmm_area_size;
3743 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
3744 gp_area_size, xmm_area_size);
3745 subptr(rsp, total_save_size);
3746
3747 push_set(gp_registers_to_push, 0);
3748
3749 if (save_fpu) {
3750 push_set(call_clobbered_xmm_registers(), gp_area_size);
3751 }
3752
3753 block_comment("push_call_clobbered_registers end");
3754 }
3755
3756 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
3757 block_comment("pop_call_clobbered_registers start");
3758
3759 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
3760
3761 int gp_area_size;
3762 int xmm_area_size;
3763 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
3764 gp_area_size, xmm_area_size);
3765
3766 if (restore_fpu) {
3767 pop_set(call_clobbered_xmm_registers(), gp_area_size);
3768 }
3769
3770 pop_set(gp_registers_to_pop, 0);
3771
3772 addptr(rsp, total_save_size);
3773
3774 vzeroupper();
3775
3776 block_comment("pop_call_clobbered_registers end");
3777 }
3778
3779 void MacroAssembler::push_set(XMMRegSet set, int offset) {
3780 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
3781 int spill_offset = offset;
3782
3783 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
3784 save_xmm_register(this, spill_offset, *it);
3785 spill_offset += xmm_save_size();
3786 }
3787 }
3788
3789 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
3790 int restore_size = set.size() * xmm_save_size();
3791 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
3792
3793 int restore_offset = offset + restore_size - xmm_save_size();
3794
3795 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
3796 restore_xmm_register(this, restore_offset, *it);
3797 restore_offset -= xmm_save_size();
3798 }
3799 }
3800
3801 void MacroAssembler::push_set(RegSet set, int offset) {
3802 int spill_offset;
3803 if (offset == -1) {
3804 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3805 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
3806 subptr(rsp, aligned_size);
3807 spill_offset = 0;
3808 } else {
3809 spill_offset = offset;
3810 }
3811
3812 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
3813 movptr(Address(rsp, spill_offset), *it);
3814 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3815 }
3816 }
3817
3818 void MacroAssembler::pop_set(RegSet set, int offset) {
3819
3820 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
3821 int restore_size = set.size() * gp_reg_size;
3822 int aligned_size = align_up(restore_size, StackAlignmentInBytes);
3823
3824 int restore_offset;
3825 if (offset == -1) {
3826 restore_offset = restore_size - gp_reg_size;
3827 } else {
3828 restore_offset = offset + restore_size - gp_reg_size;
3829 }
3830 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
3831 movptr(*it, Address(rsp, restore_offset));
3832 restore_offset -= gp_reg_size;
3833 }
3834
3835 if (offset == -1) {
3836 addptr(rsp, aligned_size);
3837 }
3838 }
3839
3840 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3841 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3842 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3843 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3844 Label done;
3845
3846 testptr(length_in_bytes, length_in_bytes);
3847 jcc(Assembler::zero, done);
3848
3849 // initialize topmost word, divide index by 2, check if odd and test if zero
3850 // note: for the remaining code to work, index must be a multiple of BytesPerWord
3851 #ifdef ASSERT
3852 {
3853 Label L;
3854 testptr(length_in_bytes, BytesPerWord - 1);
3855 jcc(Assembler::zero, L);
3856 stop("length must be a multiple of BytesPerWord");
3857 bind(L);
3858 }
3859 #endif
3860 Register index = length_in_bytes;
3861 xorptr(temp, temp); // use _zero reg to clear memory (shorter code)
3862 if (UseIncDec) {
3863 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
3864 } else {
3865 shrptr(index, 2); // use 2 instructions to avoid partial flag stall
3866 shrptr(index, 1);
3867 }
3868
3869 // initialize remaining object fields: index is a multiple of 2 now
3870 {
3871 Label loop;
3872 bind(loop);
3873 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
3874 decrement(index);
3875 jcc(Assembler::notZero, loop);
3876 }
3877
3878 bind(done);
3879 }
3880
3881 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
3882 inline_layout_info(holder_klass, index, inline_klass);
3883 movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
3884 }
3885
3886 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
3887 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
3888 #ifdef ASSERT
3889 {
3890 Label done;
3891 cmpptr(layout_info, 0);
3892 jcc(Assembler::notEqual, done);
3893 stop("inline_layout_info_array is null");
3894 bind(done);
3895 }
3896 #endif
3897
3898 InlineLayoutInfo array[2];
3899 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
3900 if (is_power_of_2(size)) {
3901 shll(index, log2i_exact(size)); // Scale index by power of 2
3902 } else {
3903 imull(index, index, size); // Scale the index to be the entry index * array_element_size
3904 }
3905 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
3906 }
3907
3908 // Look up the method for a megamorphic invokeinterface call.
3909 // The target method is determined by <intf_klass, itable_index>.
3910 // The receiver klass is in recv_klass.
3911 // On success, the result will be in method_result, and execution falls through.
3912 // On failure, execution transfers to the given label.
3913 void MacroAssembler::lookup_interface_method(Register recv_klass,
3914 Register intf_klass,
3915 RegisterOrConstant itable_index,
3916 Register method_result,
3917 Register scan_temp,
3918 Label& L_no_such_interface,
3919 bool return_method) {
3920 assert_different_registers(recv_klass, intf_klass, scan_temp);
3921 assert_different_registers(method_result, intf_klass, scan_temp);
3922 assert(recv_klass != method_result || !return_method,
3923 "recv_klass can be destroyed when method isn't needed");
3924
3925 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3926 "caller must use same register for non-constant itable index as for method");
3927
3928 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3929 int vtable_base = in_bytes(Klass::vtable_start_offset());
3930 int itentry_off = in_bytes(itableMethodEntry::method_offset());
3931 int scan_step = itableOffsetEntry::size() * wordSize;
3932 int vte_size = vtableEntry::size_in_bytes();
3933 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3934 assert(vte_size == wordSize, "else adjust times_vte_scale");
3935
3936 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
3937
3938 // Could store the aligned, prescaled offset in the klass.
3939 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
3940
3941 if (return_method) {
3942 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
3943 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
3944 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
3945 }
3946
3947 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
3948 // if (scan->interface() == intf) {
3949 // result = (klass + scan->offset() + itable_index);
3950 // }
3951 // }
3952 Label search, found_method;
3953
3954 for (int peel = 1; peel >= 0; peel--) {
3955 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
3956 cmpptr(intf_klass, method_result);
3957
3958 if (peel) {
3959 jccb(Assembler::equal, found_method);
3960 } else {
3961 jccb(Assembler::notEqual, search);
3962 // (invert the test to fall through to found_method...)
3963 }
3964
3965 if (!peel) break;
3966
3967 bind(search);
3968
3969 // Check that the previous entry is non-null. A null entry means that
3970 // the receiver class doesn't implement the interface, and wasn't the
3971 // same as when the caller was compiled.
3972 testptr(method_result, method_result);
3973 jcc(Assembler::zero, L_no_such_interface);
3974 addptr(scan_temp, scan_step);
3975 }
3976
3977 bind(found_method);
3978
3979 if (return_method) {
3980 // Got a hit.
3981 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
3982 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
3983 }
3984 }
3985
3986 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
3987 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
3988 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
3989 // The target method is determined by <holder_klass, itable_index>.
3990 // The receiver klass is in recv_klass.
3991 // On success, the result will be in method_result, and execution falls through.
3992 // On failure, execution transfers to the given label.
3993 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
3994 Register holder_klass,
3995 Register resolved_klass,
3996 Register method_result,
3997 Register scan_temp,
3998 Register temp_reg2,
3999 Register receiver,
4000 int itable_index,
4001 Label& L_no_such_interface) {
4002 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
4003 Register temp_itbl_klass = method_result;
4004 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
4005
4006 int vtable_base = in_bytes(Klass::vtable_start_offset());
4007 int itentry_off = in_bytes(itableMethodEntry::method_offset());
4008 int scan_step = itableOffsetEntry::size() * wordSize;
4009 int vte_size = vtableEntry::size_in_bytes();
4010 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
4011 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
4012 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4013 assert(vte_size == wordSize, "adjust times_vte_scale");
4014
4015 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
4016
4017 // temp_itbl_klass = recv_klass.itable[0]
4018 // scan_temp = &recv_klass.itable[0] + step
4019 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
4020 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
4021 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
4022 xorptr(temp_reg, temp_reg);
4023
4024 // Initial checks:
4025 // - if (holder_klass != resolved_klass), go to "scan for resolved"
4026 // - if (itable[0] == 0), no such interface
4027 // - if (itable[0] == holder_klass), shortcut to "holder found"
4028 cmpptr(holder_klass, resolved_klass);
4029 jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
4030 testptr(temp_itbl_klass, temp_itbl_klass);
4031 jccb(Assembler::zero, L_no_such_interface);
4032 cmpptr(holder_klass, temp_itbl_klass);
4033 jccb(Assembler::equal, L_holder_found);
4034
4035 // Loop: Look for holder_klass record in itable
4036 // do {
4037 // tmp = itable[index];
4038 // index += step;
4039 // if (tmp == holder_klass) {
4040 // goto L_holder_found; // Found!
4041 // }
4042 // } while (tmp != 0);
4043 // goto L_no_such_interface // Not found.
4044 Label L_scan_holder;
4045 bind(L_scan_holder);
4046 movptr(temp_itbl_klass, Address(scan_temp, 0));
4047 addptr(scan_temp, scan_step);
4048 cmpptr(holder_klass, temp_itbl_klass);
4049 jccb(Assembler::equal, L_holder_found);
4050 testptr(temp_itbl_klass, temp_itbl_klass);
4051 jccb(Assembler::notZero, L_scan_holder);
4052
4053 jmpb(L_no_such_interface);
4054
4055 // Loop: Look for resolved_class record in itable
4056 // do {
4057 // tmp = itable[index];
4058 // index += step;
4059 // if (tmp == holder_klass) {
4060 // // Also check if we have met a holder klass
4061 // holder_tmp = itable[index-step-ioffset];
4062 // }
4063 // if (tmp == resolved_klass) {
4064 // goto L_resolved_found; // Found!
4065 // }
4066 // } while (tmp != 0);
4067 // goto L_no_such_interface // Not found.
4068 //
4069 Label L_loop_scan_resolved;
4070 bind(L_loop_scan_resolved);
4071 movptr(temp_itbl_klass, Address(scan_temp, 0));
4072 addptr(scan_temp, scan_step);
4073 bind(L_loop_scan_resolved_entry);
4074 cmpptr(holder_klass, temp_itbl_klass);
4075 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4076 cmpptr(resolved_klass, temp_itbl_klass);
4077 jccb(Assembler::equal, L_resolved_found);
4078 testptr(temp_itbl_klass, temp_itbl_klass);
4079 jccb(Assembler::notZero, L_loop_scan_resolved);
4080
4081 jmpb(L_no_such_interface);
4082
4083 Label L_ready;
4084
4085 // See if we already have a holder klass. If not, go and scan for it.
4086 bind(L_resolved_found);
4087 testptr(temp_reg, temp_reg);
4088 jccb(Assembler::zero, L_scan_holder);
4089 jmpb(L_ready);
4090
4091 bind(L_holder_found);
4092 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
4093
4094 // Finally, temp_reg contains holder_klass vtable offset
4095 bind(L_ready);
4096 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4097 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
4098 load_klass(scan_temp, receiver, noreg);
4099 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4100 } else {
4101 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
4102 }
4103 }
4104
4105
4106 // virtual method calling
4107 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4108 RegisterOrConstant vtable_index,
4109 Register method_result) {
4110 const ByteSize base = Klass::vtable_start_offset();
4111 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4112 Address vtable_entry_addr(recv_klass,
4113 vtable_index, Address::times_ptr,
4114 base + vtableEntry::method_offset());
4115 movptr(method_result, vtable_entry_addr);
4116 }
4117
4118
4119 void MacroAssembler::check_klass_subtype(Register sub_klass,
4120 Register super_klass,
4121 Register temp_reg,
4122 Label& L_success) {
4123 Label L_failure;
4124 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
4125 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
4126 bind(L_failure);
4127 }
4128
4129
4130 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4131 Register super_klass,
4132 Register temp_reg,
4133 Label* L_success,
4134 Label* L_failure,
4135 Label* L_slow_path,
4136 RegisterOrConstant super_check_offset) {
4137 assert_different_registers(sub_klass, super_klass, temp_reg);
4138 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4139 if (super_check_offset.is_register()) {
4140 assert_different_registers(sub_klass, super_klass,
4141 super_check_offset.as_register());
4142 } else if (must_load_sco) {
4143 assert(temp_reg != noreg, "supply either a temp or a register offset");
4144 }
4145
4146 Label L_fallthrough;
4147 int label_nulls = 0;
4148 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4149 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4150 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
4151 assert(label_nulls <= 1, "at most one null in the batch");
4152
4153 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4154 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4155 Address super_check_offset_addr(super_klass, sco_offset);
4156
4157 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4158 // range of a jccb. If this routine grows larger, reconsider at
4159 // least some of these.
4160 #define local_jcc(assembler_cond, label) \
4161 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4162 else jcc( assembler_cond, label) /*omit semi*/
4163
4164 // Hacked jmp, which may only be used just before L_fallthrough.
4165 #define final_jmp(label) \
4166 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4167 else jmp(label) /*omit semi*/
4168
4169 // If the pointers are equal, we are done (e.g., String[] elements).
4170 // This self-check enables sharing of secondary supertype arrays among
4171 // non-primary types such as array-of-interface. Otherwise, each such
4172 // type would need its own customized SSA.
4173 // We move this check to the front of the fast path because many
4174 // type checks are in fact trivially successful in this manner,
4175 // so we get a nicely predicted branch right at the start of the check.
4176 cmpptr(sub_klass, super_klass);
4177 local_jcc(Assembler::equal, *L_success);
4178
4179 // Check the supertype display:
4180 if (must_load_sco) {
4181 // Positive movl does right thing on LP64.
4182 movl(temp_reg, super_check_offset_addr);
4183 super_check_offset = RegisterOrConstant(temp_reg);
4184 }
4185 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4186 cmpptr(super_klass, super_check_addr); // load displayed supertype
4187
4188 // This check has worked decisively for primary supers.
4189 // Secondary supers are sought in the super_cache ('super_cache_addr').
4190 // (Secondary supers are interfaces and very deeply nested subtypes.)
4191 // This works in the same check above because of a tricky aliasing
4192 // between the super_cache and the primary super display elements.
4193 // (The 'super_check_addr' can address either, as the case requires.)
4194 // Note that the cache is updated below if it does not help us find
4195 // what we need immediately.
4196 // So if it was a primary super, we can just fail immediately.
4197 // Otherwise, it's the slow path for us (no success at this point).
4198
4199 if (super_check_offset.is_register()) {
4200 local_jcc(Assembler::equal, *L_success);
4201 cmpl(super_check_offset.as_register(), sc_offset);
4202 if (L_failure == &L_fallthrough) {
4203 local_jcc(Assembler::equal, *L_slow_path);
4204 } else {
4205 local_jcc(Assembler::notEqual, *L_failure);
4206 final_jmp(*L_slow_path);
4207 }
4208 } else if (super_check_offset.as_constant() == sc_offset) {
4209 // Need a slow path; fast failure is impossible.
4210 if (L_slow_path == &L_fallthrough) {
4211 local_jcc(Assembler::equal, *L_success);
4212 } else {
4213 local_jcc(Assembler::notEqual, *L_slow_path);
4214 final_jmp(*L_success);
4215 }
4216 } else {
4217 // No slow path; it's a fast decision.
4218 if (L_failure == &L_fallthrough) {
4219 local_jcc(Assembler::equal, *L_success);
4220 } else {
4221 local_jcc(Assembler::notEqual, *L_failure);
4222 final_jmp(*L_success);
4223 }
4224 }
4225
4226 bind(L_fallthrough);
4227
4228 #undef local_jcc
4229 #undef final_jmp
4230 }
4231
4232
4233 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
4234 Register super_klass,
4235 Register temp_reg,
4236 Register temp2_reg,
4237 Label* L_success,
4238 Label* L_failure,
4239 bool set_cond_codes) {
4240 assert_different_registers(sub_klass, super_klass, temp_reg);
4241 if (temp2_reg != noreg)
4242 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4243 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4244
4245 Label L_fallthrough;
4246 int label_nulls = 0;
4247 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4248 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4249 assert(label_nulls <= 1, "at most one null in the batch");
4250
4251 // a couple of useful fields in sub_klass:
4252 int ss_offset = in_bytes(Klass::secondary_supers_offset());
4253 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4254 Address secondary_supers_addr(sub_klass, ss_offset);
4255 Address super_cache_addr( sub_klass, sc_offset);
4256
4257 // Do a linear scan of the secondary super-klass chain.
4258 // This code is rarely used, so simplicity is a virtue here.
4259 // The repne_scan instruction uses fixed registers, which we must spill.
4260 // Don't worry too much about pre-existing connections with the input regs.
4261
4262 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
4263 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
4264
4265 // Get super_klass value into rax (even if it was in rdi or rcx).
4266 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
4267 if (super_klass != rax) {
4268 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
4269 mov(rax, super_klass);
4270 }
4271 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
4272 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
4273
4274 #ifndef PRODUCT
4275 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
4276 ExternalAddress pst_counter_addr((address) pst_counter);
4277 lea(rcx, pst_counter_addr);
4278 incrementl(Address(rcx, 0));
4279 #endif //PRODUCT
4280
4281 // We will consult the secondary-super array.
4282 movptr(rdi, secondary_supers_addr);
4283 // Load the array length. (Positive movl does right thing on LP64.)
4284 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
4285 // Skip to start of data.
4286 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
4287
4288 // Scan RCX words at [RDI] for an occurrence of RAX.
4289 // Set NZ/Z based on last compare.
4290 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
4291 // not change flags (only scas instruction which is repeated sets flags).
4292 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
4293
4294 testptr(rax,rax); // Set Z = 0
4295 repne_scan();
4296
4297 // Unspill the temp. registers:
4298 if (pushed_rdi) pop(rdi);
4299 if (pushed_rcx) pop(rcx);
4300 if (pushed_rax) pop(rax);
4301
4302 if (set_cond_codes) {
4303 // Special hack for the AD files: rdi is guaranteed non-zero.
4304 assert(!pushed_rdi, "rdi must be left non-null");
4305 // Also, the condition codes are properly set Z/NZ on succeed/failure.
4306 }
4307
4308 if (L_failure == &L_fallthrough)
4309 jccb(Assembler::notEqual, *L_failure);
4310 else jcc(Assembler::notEqual, *L_failure);
4311
4312 // Success. Cache the super we found and proceed in triumph.
4313 movptr(super_cache_addr, super_klass);
4314
4315 if (L_success != &L_fallthrough) {
4316 jmp(*L_success);
4317 }
4318
4319 #undef IS_A_TEMP
4320
4321 bind(L_fallthrough);
4322 }
4323
4324 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4325 Register super_klass,
4326 Register temp_reg,
4327 Register temp2_reg,
4328 Label* L_success,
4329 Label* L_failure,
4330 bool set_cond_codes) {
4331 assert(set_cond_codes == false, "must be false on 64-bit x86");
4332 check_klass_subtype_slow_path
4333 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
4334 L_success, L_failure);
4335 }
4336
4337 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4338 Register super_klass,
4339 Register temp_reg,
4340 Register temp2_reg,
4341 Register temp3_reg,
4342 Register temp4_reg,
4343 Label* L_success,
4344 Label* L_failure) {
4345 if (UseSecondarySupersTable) {
4346 check_klass_subtype_slow_path_table
4347 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
4348 L_success, L_failure);
4349 } else {
4350 check_klass_subtype_slow_path_linear
4351 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
4352 }
4353 }
4354
4355 Register MacroAssembler::allocate_if_noreg(Register r,
4356 RegSetIterator<Register> &available_regs,
4357 RegSet ®s_to_push) {
4358 if (!r->is_valid()) {
4359 r = *available_regs++;
4360 regs_to_push += r;
4361 }
4362 return r;
4363 }
4364
4365 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
4366 Register super_klass,
4367 Register temp_reg,
4368 Register temp2_reg,
4369 Register temp3_reg,
4370 Register result_reg,
4371 Label* L_success,
4372 Label* L_failure) {
4373 // NB! Callers may assume that, when temp2_reg is a valid register,
4374 // this code sets it to a nonzero value.
4375 bool temp2_reg_was_valid = temp2_reg->is_valid();
4376
4377 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
4378
4379 Label L_fallthrough;
4380 int label_nulls = 0;
4381 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4382 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4383 assert(label_nulls <= 1, "at most one null in the batch");
4384
4385 BLOCK_COMMENT("check_klass_subtype_slow_path_table");
4386
4387 RegSetIterator<Register> available_regs
4388 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
4389
4390 RegSet pushed_regs;
4391
4392 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
4393 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
4394 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
4395 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
4396 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
4397
4398 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
4399
4400 {
4401
4402 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
4403 int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
4404 subptr(rsp, aligned_size);
4405 push_set(pushed_regs, 0);
4406
4407 lookup_secondary_supers_table_var(sub_klass,
4408 super_klass,
4409 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
4410 cmpq(result_reg, 0);
4411
4412 // Unspill the temp. registers:
4413 pop_set(pushed_regs, 0);
4414 // Increment SP but do not clobber flags.
4415 lea(rsp, Address(rsp, aligned_size));
4416 }
4417
4418 if (temp2_reg_was_valid) {
4419 movq(temp2_reg, 1);
4420 }
4421
4422 jcc(Assembler::notEqual, *L_failure);
4423
4424 if (L_success != &L_fallthrough) {
4425 jmp(*L_success);
4426 }
4427
4428 bind(L_fallthrough);
4429 }
4430
4431 // population_count variant for running without the POPCNT
4432 // instruction, which was introduced with SSE4.2 in 2008.
4433 void MacroAssembler::population_count(Register dst, Register src,
4434 Register scratch1, Register scratch2) {
4435 assert_different_registers(src, scratch1, scratch2);
4436 if (UsePopCountInstruction) {
4437 Assembler::popcntq(dst, src);
4438 } else {
4439 assert_different_registers(src, scratch1, scratch2);
4440 assert_different_registers(dst, scratch1, scratch2);
4441 Label loop, done;
4442
4443 mov(scratch1, src);
4444 // dst = 0;
4445 // while(scratch1 != 0) {
4446 // dst++;
4447 // scratch1 &= (scratch1 - 1);
4448 // }
4449 xorl(dst, dst);
4450 testq(scratch1, scratch1);
4451 jccb(Assembler::equal, done);
4452 {
4453 bind(loop);
4454 incq(dst);
4455 movq(scratch2, scratch1);
4456 decq(scratch2);
4457 andq(scratch1, scratch2);
4458 jccb(Assembler::notEqual, loop);
4459 }
4460 bind(done);
4461 }
4462 #ifdef ASSERT
4463 mov64(scratch1, 0xCafeBabeDeadBeef);
4464 movq(scratch2, scratch1);
4465 #endif
4466 }
4467
4468 // Ensure that the inline code and the stub are using the same registers.
4469 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
4470 do { \
4471 assert(r_super_klass == rax, "mismatch"); \
4472 assert(r_array_base == rbx, "mismatch"); \
4473 assert(r_array_length == rcx, "mismatch"); \
4474 assert(r_array_index == rdx, "mismatch"); \
4475 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \
4476 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \
4477 assert(result == rdi || result == noreg, "mismatch"); \
4478 } while(0)
4479
4480 // Versions of salq and rorq that don't need count to be in rcx
4481
4482 void MacroAssembler::salq(Register dest, Register count) {
4483 if (count == rcx) {
4484 Assembler::salq(dest);
4485 } else {
4486 assert_different_registers(rcx, dest);
4487 xchgq(rcx, count);
4488 Assembler::salq(dest);
4489 xchgq(rcx, count);
4490 }
4491 }
4492
4493 void MacroAssembler::rorq(Register dest, Register count) {
4494 if (count == rcx) {
4495 Assembler::rorq(dest);
4496 } else {
4497 assert_different_registers(rcx, dest);
4498 xchgq(rcx, count);
4499 Assembler::rorq(dest);
4500 xchgq(rcx, count);
4501 }
4502 }
4503
4504 // Return true: we succeeded in generating this code
4505 //
4506 // At runtime, return 0 in result if r_super_klass is a superclass of
4507 // r_sub_klass, otherwise return nonzero. Use this if you know the
4508 // super_klass_slot of the class you're looking for. This is always
4509 // the case for instanceof and checkcast.
4510 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
4511 Register r_super_klass,
4512 Register temp1,
4513 Register temp2,
4514 Register temp3,
4515 Register temp4,
4516 Register result,
4517 u1 super_klass_slot) {
4518 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4519
4520 Label L_fallthrough, L_success, L_failure;
4521
4522 BLOCK_COMMENT("lookup_secondary_supers_table {");
4523
4524 const Register
4525 r_array_index = temp1,
4526 r_array_length = temp2,
4527 r_array_base = temp3,
4528 r_bitmap = temp4;
4529
4530 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
4531
4532 xorq(result, result); // = 0
4533
4534 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4535 movq(r_array_index, r_bitmap);
4536
4537 // First check the bitmap to see if super_klass might be present. If
4538 // the bit is zero, we are certain that super_klass is not one of
4539 // the secondary supers.
4540 u1 bit = super_klass_slot;
4541 {
4542 // NB: If the count in a x86 shift instruction is 0, the flags are
4543 // not affected, so we do a testq instead.
4544 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
4545 if (shift_count != 0) {
4546 salq(r_array_index, shift_count);
4547 } else {
4548 testq(r_array_index, r_array_index);
4549 }
4550 }
4551 // We test the MSB of r_array_index, i.e. its sign bit
4552 jcc(Assembler::positive, L_failure);
4553
4554 // Get the first array index that can contain super_klass into r_array_index.
4555 if (bit != 0) {
4556 population_count(r_array_index, r_array_index, temp2, temp3);
4557 } else {
4558 movl(r_array_index, 1);
4559 }
4560 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4561
4562 // We will consult the secondary-super array.
4563 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4564
4565 // We're asserting that the first word in an Array<Klass*> is the
4566 // length, and the second word is the first word of the data. If
4567 // that ever changes, r_array_base will have to be adjusted here.
4568 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4569 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4570
4571 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4572 jccb(Assembler::equal, L_success);
4573
4574 // Is there another entry to check? Consult the bitmap.
4575 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
4576 jccb(Assembler::carryClear, L_failure);
4577
4578 // Linear probe. Rotate the bitmap so that the next bit to test is
4579 // in Bit 1.
4580 if (bit != 0) {
4581 rorq(r_bitmap, bit);
4582 }
4583
4584 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4585 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4586 // Kills: r_array_length.
4587 // Returns: result.
4588 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
4589 // Result (0/1) is in rdi
4590 jmpb(L_fallthrough);
4591
4592 bind(L_failure);
4593 incq(result); // 0 => 1
4594
4595 bind(L_success);
4596 // result = 0;
4597
4598 bind(L_fallthrough);
4599 BLOCK_COMMENT("} lookup_secondary_supers_table");
4600
4601 if (VerifySecondarySupers) {
4602 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4603 temp1, temp2, temp3);
4604 }
4605 }
4606
4607 // At runtime, return 0 in result if r_super_klass is a superclass of
4608 // r_sub_klass, otherwise return nonzero. Use this version of
4609 // lookup_secondary_supers_table() if you don't know ahead of time
4610 // which superclass will be searched for. Used by interpreter and
4611 // runtime stubs. It is larger and has somewhat greater latency than
4612 // the version above, which takes a constant super_klass_slot.
4613 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
4614 Register r_super_klass,
4615 Register temp1,
4616 Register temp2,
4617 Register temp3,
4618 Register temp4,
4619 Register result) {
4620 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
4621 assert_different_registers(r_sub_klass, r_super_klass, rcx);
4622 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
4623
4624 Label L_fallthrough, L_success, L_failure;
4625
4626 BLOCK_COMMENT("lookup_secondary_supers_table {");
4627
4628 RegSetIterator<Register> available_regs = (temps - rcx).begin();
4629
4630 // FIXME. Once we are sure that all paths reaching this point really
4631 // do pass rcx as one of our temps we can get rid of the following
4632 // workaround.
4633 assert(temps.contains(rcx), "fix this code");
4634
4635 // We prefer to have our shift count in rcx. If rcx is one of our
4636 // temps, use it for slot. If not, pick any of our temps.
4637 Register slot;
4638 if (!temps.contains(rcx)) {
4639 slot = *available_regs++;
4640 } else {
4641 slot = rcx;
4642 }
4643
4644 const Register r_array_index = *available_regs++;
4645 const Register r_bitmap = *available_regs++;
4646
4647 // The logic above guarantees this property, but we state it here.
4648 assert_different_registers(r_array_index, r_bitmap, rcx);
4649
4650 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
4651 movq(r_array_index, r_bitmap);
4652
4653 // First check the bitmap to see if super_klass might be present. If
4654 // the bit is zero, we are certain that super_klass is not one of
4655 // the secondary supers.
4656 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4657 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
4658 salq(r_array_index, slot);
4659
4660 testq(r_array_index, r_array_index);
4661 // We test the MSB of r_array_index, i.e. its sign bit
4662 jcc(Assembler::positive, L_failure);
4663
4664 const Register r_array_base = *available_regs++;
4665
4666 // Get the first array index that can contain super_klass into r_array_index.
4667 // Note: Clobbers r_array_base and slot.
4668 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
4669
4670 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
4671
4672 // We will consult the secondary-super array.
4673 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4674
4675 // We're asserting that the first word in an Array<Klass*> is the
4676 // length, and the second word is the first word of the data. If
4677 // that ever changes, r_array_base will have to be adjusted here.
4678 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
4679 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
4680
4681 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4682 jccb(Assembler::equal, L_success);
4683
4684 // Restore slot to its true value
4685 movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
4686
4687 // Linear probe. Rotate the bitmap so that the next bit to test is
4688 // in Bit 1.
4689 rorq(r_bitmap, slot);
4690
4691 // Is there another entry to check? Consult the bitmap.
4692 btq(r_bitmap, 1);
4693 jccb(Assembler::carryClear, L_failure);
4694
4695 // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
4696 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
4697 // Kills: r_array_length.
4698 // Returns: result.
4699 lookup_secondary_supers_table_slow_path(r_super_klass,
4700 r_array_base,
4701 r_array_index,
4702 r_bitmap,
4703 /*temp1*/result,
4704 /*temp2*/slot,
4705 &L_success,
4706 nullptr);
4707
4708 bind(L_failure);
4709 movq(result, 1);
4710 jmpb(L_fallthrough);
4711
4712 bind(L_success);
4713 xorq(result, result); // = 0
4714
4715 bind(L_fallthrough);
4716 BLOCK_COMMENT("} lookup_secondary_supers_table");
4717
4718 if (VerifySecondarySupers) {
4719 verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
4720 temp1, temp2, temp3);
4721 }
4722 }
4723
4724 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
4725 Label* L_success, Label* L_failure) {
4726 Label L_loop, L_fallthrough;
4727 {
4728 int label_nulls = 0;
4729 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4730 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4731 assert(label_nulls <= 1, "at most one null in the batch");
4732 }
4733 bind(L_loop);
4734 cmpq(value, Address(addr, count, Address::times_8));
4735 jcc(Assembler::equal, *L_success);
4736 addl(count, 1);
4737 cmpl(count, limit);
4738 jcc(Assembler::less, L_loop);
4739
4740 if (&L_fallthrough != L_failure) {
4741 jmp(*L_failure);
4742 }
4743 bind(L_fallthrough);
4744 }
4745
4746 // Called by code generated by check_klass_subtype_slow_path
4747 // above. This is called when there is a collision in the hashed
4748 // lookup in the secondary supers array.
4749 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
4750 Register r_array_base,
4751 Register r_array_index,
4752 Register r_bitmap,
4753 Register temp1,
4754 Register temp2,
4755 Label* L_success,
4756 Label* L_failure) {
4757 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
4758
4759 const Register
4760 r_array_length = temp1,
4761 r_sub_klass = noreg,
4762 result = noreg;
4763
4764 Label L_fallthrough;
4765 int label_nulls = 0;
4766 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
4767 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
4768 assert(label_nulls <= 1, "at most one null in the batch");
4769
4770 // Load the array length.
4771 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4772 // And adjust the array base to point to the data.
4773 // NB! Effectively increments current slot index by 1.
4774 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
4775 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4776
4777 // Linear probe
4778 Label L_huge;
4779
4780 // The bitmap is full to bursting.
4781 // Implicit invariant: BITMAP_FULL implies (length > 0)
4782 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
4783 jcc(Assembler::greater, L_huge);
4784
4785 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
4786 // current slot (at secondary_supers[r_array_index]) has not yet
4787 // been inspected, and r_array_index may be out of bounds if we
4788 // wrapped around the end of the array.
4789
4790 { // This is conventional linear probing, but instead of terminating
4791 // when a null entry is found in the table, we maintain a bitmap
4792 // in which a 0 indicates missing entries.
4793 // The check above guarantees there are 0s in the bitmap, so the loop
4794 // eventually terminates.
4795
4796 xorl(temp2, temp2); // = 0;
4797
4798 Label L_again;
4799 bind(L_again);
4800
4801 // Check for array wraparound.
4802 cmpl(r_array_index, r_array_length);
4803 cmovl(Assembler::greaterEqual, r_array_index, temp2);
4804
4805 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
4806 jcc(Assembler::equal, *L_success);
4807
4808 // If the next bit in bitmap is zero, we're done.
4809 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
4810 jcc(Assembler::carryClear, *L_failure);
4811
4812 rorq(r_bitmap, 1); // Bits 1/2 => 0/1
4813 addl(r_array_index, 1);
4814
4815 jmp(L_again);
4816 }
4817
4818 { // Degenerate case: more than 64 secondary supers.
4819 // FIXME: We could do something smarter here, maybe a vectorized
4820 // comparison or a binary search, but is that worth any added
4821 // complexity?
4822 bind(L_huge);
4823 xorl(r_array_index, r_array_index); // = 0
4824 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
4825 L_success,
4826 (&L_fallthrough != L_failure ? L_failure : nullptr));
4827
4828 bind(L_fallthrough);
4829 }
4830 }
4831
4832 struct VerifyHelperArguments {
4833 Klass* _super;
4834 Klass* _sub;
4835 intptr_t _linear_result;
4836 intptr_t _table_result;
4837 };
4838
4839 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
4840 Klass::on_secondary_supers_verification_failure(args->_super,
4841 args->_sub,
4842 args->_linear_result,
4843 args->_table_result,
4844 msg);
4845 }
4846
4847 // Make sure that the hashed lookup and a linear scan agree.
4848 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
4849 Register r_super_klass,
4850 Register result,
4851 Register temp1,
4852 Register temp2,
4853 Register temp3) {
4854 const Register
4855 r_array_index = temp1,
4856 r_array_length = temp2,
4857 r_array_base = temp3,
4858 r_bitmap = noreg;
4859
4860 BLOCK_COMMENT("verify_secondary_supers_table {");
4861
4862 Label L_success, L_failure, L_check, L_done;
4863
4864 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
4865 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
4866 // And adjust the array base to point to the data.
4867 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
4868
4869 testl(r_array_length, r_array_length); // array_length == 0?
4870 jcc(Assembler::zero, L_failure);
4871
4872 movl(r_array_index, 0);
4873 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
4874 // fall through to L_failure
4875
4876 const Register linear_result = r_array_index; // reuse temp1
4877
4878 bind(L_failure); // not present
4879 movl(linear_result, 1);
4880 jmp(L_check);
4881
4882 bind(L_success); // present
4883 movl(linear_result, 0);
4884
4885 bind(L_check);
4886 cmpl(linear_result, result);
4887 jcc(Assembler::equal, L_done);
4888
4889 { // To avoid calling convention issues, build a record on the stack
4890 // and pass the pointer to that instead.
4891 push(result);
4892 push(linear_result);
4893 push(r_sub_klass);
4894 push(r_super_klass);
4895 movptr(c_rarg1, rsp);
4896 movptr(c_rarg0, (uintptr_t) "mismatch");
4897 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
4898 should_not_reach_here();
4899 }
4900 bind(L_done);
4901
4902 BLOCK_COMMENT("} verify_secondary_supers_table");
4903 }
4904
4905 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
4906
4907 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
4908 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
4909
4910 Label L_fallthrough;
4911 if (L_fast_path == nullptr) {
4912 L_fast_path = &L_fallthrough;
4913 } else if (L_slow_path == nullptr) {
4914 L_slow_path = &L_fallthrough;
4915 }
4916
4917 // Fast path check: class is fully initialized.
4918 // init_state needs acquire, but x86 is TSO, and so we are already good.
4919 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4920 jcc(Assembler::equal, *L_fast_path);
4921
4922 // Fast path check: current thread is initializer thread
4923 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
4924 if (L_slow_path == &L_fallthrough) {
4925 jcc(Assembler::equal, *L_fast_path);
4926 bind(*L_slow_path);
4927 } else if (L_fast_path == &L_fallthrough) {
4928 jcc(Assembler::notEqual, *L_slow_path);
4929 bind(*L_fast_path);
4930 } else {
4931 Unimplemented();
4932 }
4933 }
4934
4935 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
4936 if (VM_Version::supports_cmov()) {
4937 cmovl(cc, dst, src);
4938 } else {
4939 Label L;
4940 jccb(negate_condition(cc), L);
4941 movl(dst, src);
4942 bind(L);
4943 }
4944 }
4945
4946 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
4947 if (VM_Version::supports_cmov()) {
4948 cmovl(cc, dst, src);
4949 } else {
4950 Label L;
4951 jccb(negate_condition(cc), L);
4952 movl(dst, src);
4953 bind(L);
4954 }
4955 }
4956
4957 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
4958 if (!VerifyOops || VerifyAdapterSharing) {
4959 // Below address of the code string confuses VerifyAdapterSharing
4960 // because it may differ between otherwise equivalent adapters.
4961 return;
4962 }
4963
4964 BLOCK_COMMENT("verify_oop {");
4965 push(rscratch1);
4966 push(rax); // save rax
4967 push(reg); // pass register argument
4968
4969 // Pass register number to verify_oop_subroutine
4970 const char* b = nullptr;
4971 {
4972 ResourceMark rm;
4973 stringStream ss;
4974 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
4975 b = code_string(ss.as_string());
4976 }
4977 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
4978 pushptr(buffer.addr(), rscratch1);
4979
4980 // call indirectly to solve generation ordering problem
4981 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4982 call(rax);
4983 // Caller pops the arguments (oop, message) and restores rax, r10
4984 BLOCK_COMMENT("} verify_oop");
4985 }
4986
4987 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
4988 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
4989 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
4990 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
4991 vpternlogd(dst, 0xFF, dst, dst, vector_len);
4992 } else if (VM_Version::supports_avx()) {
4993 vpcmpeqd(dst, dst, dst, vector_len);
4994 } else {
4995 pcmpeqd(dst, dst);
4996 }
4997 }
4998
4999 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5000 int extra_slot_offset) {
5001 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5002 int stackElementSize = Interpreter::stackElementSize;
5003 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5004 #ifdef ASSERT
5005 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5006 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5007 #endif
5008 Register scale_reg = noreg;
5009 Address::ScaleFactor scale_factor = Address::no_scale;
5010 if (arg_slot.is_constant()) {
5011 offset += arg_slot.as_constant() * stackElementSize;
5012 } else {
5013 scale_reg = arg_slot.as_register();
5014 scale_factor = Address::times(stackElementSize);
5015 }
5016 offset += wordSize; // return PC is on stack
5017 return Address(rsp, scale_reg, scale_factor, offset);
5018 }
5019
5020 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5021 if (!VerifyOops || VerifyAdapterSharing) {
5022 // Below address of the code string confuses VerifyAdapterSharing
5023 // because it may differ between otherwise equivalent adapters.
5024 return;
5025 }
5026
5027 push(rscratch1);
5028 push(rax); // save rax,
5029 // addr may contain rsp so we will have to adjust it based on the push
5030 // we just did (and on 64 bit we do two pushes)
5031 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5032 // stores rax into addr which is backwards of what was intended.
5033 if (addr.uses(rsp)) {
5034 lea(rax, addr);
5035 pushptr(Address(rax, 2 * BytesPerWord));
5036 } else {
5037 pushptr(addr);
5038 }
5039
5040 // Pass register number to verify_oop_subroutine
5041 const char* b = nullptr;
5042 {
5043 ResourceMark rm;
5044 stringStream ss;
5045 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
5046 b = code_string(ss.as_string());
5047 }
5048 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5049 pushptr(buffer.addr(), rscratch1);
5050
5051 // call indirectly to solve generation ordering problem
5052 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5053 call(rax);
5054 // Caller pops the arguments (addr, message) and restores rax, r10.
5055 }
5056
5057 void MacroAssembler::verify_tlab() {
5058 #ifdef ASSERT
5059 if (UseTLAB && VerifyOops) {
5060 Label next, ok;
5061 Register t1 = rsi;
5062
5063 push(t1);
5064
5065 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5066 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
5067 jcc(Assembler::aboveEqual, next);
5068 STOP("assert(top >= start)");
5069 should_not_reach_here();
5070
5071 bind(next);
5072 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
5073 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
5074 jcc(Assembler::aboveEqual, ok);
5075 STOP("assert(top <= end)");
5076 should_not_reach_here();
5077
5078 bind(ok);
5079 pop(t1);
5080 }
5081 #endif
5082 }
5083
5084 class ControlWord {
5085 public:
5086 int32_t _value;
5087
5088 int rounding_control() const { return (_value >> 10) & 3 ; }
5089 int precision_control() const { return (_value >> 8) & 3 ; }
5090 bool precision() const { return ((_value >> 5) & 1) != 0; }
5091 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5092 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5093 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5094 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5095 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5096
5097 void print() const {
5098 // rounding control
5099 const char* rc;
5100 switch (rounding_control()) {
5101 case 0: rc = "round near"; break;
5102 case 1: rc = "round down"; break;
5103 case 2: rc = "round up "; break;
5104 case 3: rc = "chop "; break;
5105 default:
5106 rc = nullptr; // silence compiler warnings
5107 fatal("Unknown rounding control: %d", rounding_control());
5108 };
5109 // precision control
5110 const char* pc;
5111 switch (precision_control()) {
5112 case 0: pc = "24 bits "; break;
5113 case 1: pc = "reserved"; break;
5114 case 2: pc = "53 bits "; break;
5115 case 3: pc = "64 bits "; break;
5116 default:
5117 pc = nullptr; // silence compiler warnings
5118 fatal("Unknown precision control: %d", precision_control());
5119 };
5120 // flags
5121 char f[9];
5122 f[0] = ' ';
5123 f[1] = ' ';
5124 f[2] = (precision ()) ? 'P' : 'p';
5125 f[3] = (underflow ()) ? 'U' : 'u';
5126 f[4] = (overflow ()) ? 'O' : 'o';
5127 f[5] = (zero_divide ()) ? 'Z' : 'z';
5128 f[6] = (denormalized()) ? 'D' : 'd';
5129 f[7] = (invalid ()) ? 'I' : 'i';
5130 f[8] = '\x0';
5131 // output
5132 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5133 }
5134
5135 };
5136
5137 class StatusWord {
5138 public:
5139 int32_t _value;
5140
5141 bool busy() const { return ((_value >> 15) & 1) != 0; }
5142 bool C3() const { return ((_value >> 14) & 1) != 0; }
5143 bool C2() const { return ((_value >> 10) & 1) != 0; }
5144 bool C1() const { return ((_value >> 9) & 1) != 0; }
5145 bool C0() const { return ((_value >> 8) & 1) != 0; }
5146 int top() const { return (_value >> 11) & 7 ; }
5147 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5148 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5149 bool precision() const { return ((_value >> 5) & 1) != 0; }
5150 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5151 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5152 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5153 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5154 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5155
5156 void print() const {
5157 // condition codes
5158 char c[5];
5159 c[0] = (C3()) ? '3' : '-';
5160 c[1] = (C2()) ? '2' : '-';
5161 c[2] = (C1()) ? '1' : '-';
5162 c[3] = (C0()) ? '0' : '-';
5163 c[4] = '\x0';
5164 // flags
5165 char f[9];
5166 f[0] = (error_status()) ? 'E' : '-';
5167 f[1] = (stack_fault ()) ? 'S' : '-';
5168 f[2] = (precision ()) ? 'P' : '-';
5169 f[3] = (underflow ()) ? 'U' : '-';
5170 f[4] = (overflow ()) ? 'O' : '-';
5171 f[5] = (zero_divide ()) ? 'Z' : '-';
5172 f[6] = (denormalized()) ? 'D' : '-';
5173 f[7] = (invalid ()) ? 'I' : '-';
5174 f[8] = '\x0';
5175 // output
5176 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5177 }
5178
5179 };
5180
5181 class TagWord {
5182 public:
5183 int32_t _value;
5184
5185 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5186
5187 void print() const {
5188 printf("%04x", _value & 0xFFFF);
5189 }
5190
5191 };
5192
5193 class FPU_Register {
5194 public:
5195 int32_t _m0;
5196 int32_t _m1;
5197 int16_t _ex;
5198
5199 bool is_indefinite() const {
5200 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5201 }
5202
5203 void print() const {
5204 char sign = (_ex < 0) ? '-' : '+';
5205 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5206 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5207 };
5208
5209 };
5210
5211 class FPU_State {
5212 public:
5213 enum {
5214 register_size = 10,
5215 number_of_registers = 8,
5216 register_mask = 7
5217 };
5218
5219 ControlWord _control_word;
5220 StatusWord _status_word;
5221 TagWord _tag_word;
5222 int32_t _error_offset;
5223 int32_t _error_selector;
5224 int32_t _data_offset;
5225 int32_t _data_selector;
5226 int8_t _register[register_size * number_of_registers];
5227
5228 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5229 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5230
5231 const char* tag_as_string(int tag) const {
5232 switch (tag) {
5233 case 0: return "valid";
5234 case 1: return "zero";
5235 case 2: return "special";
5236 case 3: return "empty";
5237 }
5238 ShouldNotReachHere();
5239 return nullptr;
5240 }
5241
5242 void print() const {
5243 // print computation registers
5244 { int t = _status_word.top();
5245 for (int i = 0; i < number_of_registers; i++) {
5246 int j = (i - t) & register_mask;
5247 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5248 st(j)->print();
5249 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5250 }
5251 }
5252 printf("\n");
5253 // print control registers
5254 printf("ctrl = "); _control_word.print(); printf("\n");
5255 printf("stat = "); _status_word .print(); printf("\n");
5256 printf("tags = "); _tag_word .print(); printf("\n");
5257 }
5258
5259 };
5260
5261 class Flag_Register {
5262 public:
5263 int32_t _value;
5264
5265 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5266 bool direction() const { return ((_value >> 10) & 1) != 0; }
5267 bool sign() const { return ((_value >> 7) & 1) != 0; }
5268 bool zero() const { return ((_value >> 6) & 1) != 0; }
5269 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5270 bool parity() const { return ((_value >> 2) & 1) != 0; }
5271 bool carry() const { return ((_value >> 0) & 1) != 0; }
5272
5273 void print() const {
5274 // flags
5275 char f[8];
5276 f[0] = (overflow ()) ? 'O' : '-';
5277 f[1] = (direction ()) ? 'D' : '-';
5278 f[2] = (sign ()) ? 'S' : '-';
5279 f[3] = (zero ()) ? 'Z' : '-';
5280 f[4] = (auxiliary_carry()) ? 'A' : '-';
5281 f[5] = (parity ()) ? 'P' : '-';
5282 f[6] = (carry ()) ? 'C' : '-';
5283 f[7] = '\x0';
5284 // output
5285 printf("%08x flags = %s", _value, f);
5286 }
5287
5288 };
5289
5290 class IU_Register {
5291 public:
5292 int32_t _value;
5293
5294 void print() const {
5295 printf("%08x %11d", _value, _value);
5296 }
5297
5298 };
5299
5300 class IU_State {
5301 public:
5302 Flag_Register _eflags;
5303 IU_Register _rdi;
5304 IU_Register _rsi;
5305 IU_Register _rbp;
5306 IU_Register _rsp;
5307 IU_Register _rbx;
5308 IU_Register _rdx;
5309 IU_Register _rcx;
5310 IU_Register _rax;
5311
5312 void print() const {
5313 // computation registers
5314 printf("rax, = "); _rax.print(); printf("\n");
5315 printf("rbx, = "); _rbx.print(); printf("\n");
5316 printf("rcx = "); _rcx.print(); printf("\n");
5317 printf("rdx = "); _rdx.print(); printf("\n");
5318 printf("rdi = "); _rdi.print(); printf("\n");
5319 printf("rsi = "); _rsi.print(); printf("\n");
5320 printf("rbp, = "); _rbp.print(); printf("\n");
5321 printf("rsp = "); _rsp.print(); printf("\n");
5322 printf("\n");
5323 // control registers
5324 printf("flgs = "); _eflags.print(); printf("\n");
5325 }
5326 };
5327
5328
5329 class CPU_State {
5330 public:
5331 FPU_State _fpu_state;
5332 IU_State _iu_state;
5333
5334 void print() const {
5335 printf("--------------------------------------------------\n");
5336 _iu_state .print();
5337 printf("\n");
5338 _fpu_state.print();
5339 printf("--------------------------------------------------\n");
5340 }
5341
5342 };
5343
5344
5345 static void _print_CPU_state(CPU_State* state) {
5346 state->print();
5347 };
5348
5349
5350 void MacroAssembler::print_CPU_state() {
5351 push_CPU_state();
5352 push(rsp); // pass CPU state
5353 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5354 addptr(rsp, wordSize); // discard argument
5355 pop_CPU_state();
5356 }
5357
5358 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
5359 // Either restore the MXCSR register after returning from the JNI Call
5360 // or verify that it wasn't changed (with -Xcheck:jni flag).
5361 if (VM_Version::supports_sse()) {
5362 if (RestoreMXCSROnJNICalls) {
5363 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
5364 } else if (CheckJNICalls) {
5365 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5366 }
5367 }
5368 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5369 vzeroupper();
5370 }
5371
5372 // ((OopHandle)result).resolve();
5373 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
5374 assert_different_registers(result, tmp);
5375
5376 // Only 64 bit platforms support GCs that require a tmp register
5377 // Only IN_HEAP loads require a thread_tmp register
5378 // OopHandle::resolve is an indirection like jobject.
5379 access_load_at(T_OBJECT, IN_NATIVE,
5380 result, Address(result, 0), tmp);
5381 }
5382
5383 // ((WeakHandle)result).resolve();
5384 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
5385 assert_different_registers(rresult, rtmp);
5386 Label resolved;
5387
5388 // A null weak handle resolves to null.
5389 cmpptr(rresult, 0);
5390 jcc(Assembler::equal, resolved);
5391
5392 // Only 64 bit platforms support GCs that require a tmp register
5393 // Only IN_HEAP loads require a thread_tmp register
5394 // WeakHandle::resolve is an indirection like jweak.
5395 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5396 rresult, Address(rresult, 0), rtmp);
5397 bind(resolved);
5398 }
5399
5400 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5401 // get mirror
5402 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5403 load_method_holder(mirror, method);
5404 movptr(mirror, Address(mirror, mirror_offset));
5405 resolve_oop_handle(mirror, tmp);
5406 }
5407
5408 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5409 load_method_holder(rresult, rmethod);
5410 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5411 }
5412
5413 void MacroAssembler::load_method_holder(Register holder, Register method) {
5414 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5415 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5416 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5417 }
5418
5419 void MacroAssembler::load_metadata(Register dst, Register src) {
5420 if (UseCompactObjectHeaders) {
5421 load_narrow_klass_compact(dst, src);
5422 } else if (UseCompressedClassPointers) {
5423 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5424 } else {
5425 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5426 }
5427 }
5428
5429 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5430 assert(UseCompactObjectHeaders, "expect compact object headers");
5431 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5432 shrq(dst, markWord::klass_shift);
5433 }
5434
5435 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5436 assert_different_registers(src, tmp);
5437 assert_different_registers(dst, tmp);
5438
5439 if (UseCompactObjectHeaders) {
5440 load_narrow_klass_compact(dst, src);
5441 decode_klass_not_null(dst, tmp);
5442 } else if (UseCompressedClassPointers) {
5443 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5444 decode_klass_not_null(dst, tmp);
5445 } else {
5446 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5447 }
5448 }
5449
5450 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
5451 load_klass(dst, src, tmp);
5452 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5453 }
5454
5455 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5456 assert(!UseCompactObjectHeaders, "not with compact headers");
5457 assert_different_registers(src, tmp);
5458 assert_different_registers(dst, tmp);
5459 if (UseCompressedClassPointers) {
5460 encode_klass_not_null(src, tmp);
5461 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5462 } else {
5463 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5464 }
5465 }
5466
5467 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5468 if (UseCompactObjectHeaders) {
5469 assert(tmp != noreg, "need tmp");
5470 assert_different_registers(klass, obj, tmp);
5471 load_narrow_klass_compact(tmp, obj);
5472 cmpl(klass, tmp);
5473 } else if (UseCompressedClassPointers) {
5474 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5475 } else {
5476 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5477 }
5478 }
5479
5480 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5481 if (UseCompactObjectHeaders) {
5482 assert(tmp2 != noreg, "need tmp2");
5483 assert_different_registers(obj1, obj2, tmp1, tmp2);
5484 load_narrow_klass_compact(tmp1, obj1);
5485 load_narrow_klass_compact(tmp2, obj2);
5486 cmpl(tmp1, tmp2);
5487 } else if (UseCompressedClassPointers) {
5488 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5489 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5490 } else {
5491 movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5492 cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
5493 }
5494 }
5495
5496 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5497 Register tmp1) {
5498 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5499 decorators = AccessInternal::decorator_fixup(decorators, type);
5500 bool as_raw = (decorators & AS_RAW) != 0;
5501 if (as_raw) {
5502 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
5503 } else {
5504 bs->load_at(this, decorators, type, dst, src, tmp1);
5505 }
5506 }
5507
5508 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5509 Register tmp1, Register tmp2, Register tmp3) {
5510 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5511 decorators = AccessInternal::decorator_fixup(decorators, type);
5512 bool as_raw = (decorators & AS_RAW) != 0;
5513 if (as_raw) {
5514 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5515 } else {
5516 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5517 }
5518 }
5519
5520 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5521 Register inline_layout_info) {
5522 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5523 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5524 }
5525
5526 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5527 movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
5528 movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
5529 }
5530
5531 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
5532 // ((address) (void*) o) + vk->payload_offset();
5533 Register offset = (data == oop) ? rscratch1 : data;
5534 payload_offset(inline_klass, offset);
5535 if (data == oop) {
5536 addptr(data, offset);
5537 } else {
5538 lea(data, Address(oop, offset));
5539 }
5540 }
5541
5542 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5543 Register index, Register data) {
5544 assert(index != rcx, "index needs to shift by rcx");
5545 assert_different_registers(array, array_klass, index);
5546 assert_different_registers(rcx, array, index);
5547
5548 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5549 movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
5550
5551 // Klass::layout_helper_log2_element_size(lh)
5552 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5553 shrl(rcx, Klass::_lh_log2_element_size_shift);
5554 andl(rcx, Klass::_lh_log2_element_size_mask);
5555 shlptr(index); // index << rcx
5556
5557 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
5558 }
5559
5560 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5561 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
5562 }
5563
5564 // Doesn't do verification, generates fixed size code
5565 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
5566 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
5567 }
5568
5569 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5570 Register tmp2, Register tmp3, DecoratorSet decorators) {
5571 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5572 }
5573
5574 // Used for storing nulls.
5575 void MacroAssembler::store_heap_oop_null(Address dst) {
5576 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5577 }
5578
5579 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5580 assert(!UseCompactObjectHeaders, "Don't use with compact headers");
5581 if (UseCompressedClassPointers) {
5582 // Store to klass gap in destination
5583 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5584 }
5585 }
5586
5587 #ifdef ASSERT
5588 void MacroAssembler::verify_heapbase(const char* msg) {
5589 assert (UseCompressedOops, "should be compressed");
5590 assert (Universe::heap() != nullptr, "java heap should be initialized");
5591 if (CheckCompressedOops) {
5592 Label ok;
5593 ExternalAddress src2(CompressedOops::base_addr());
5594 const bool is_src2_reachable = reachable(src2);
5595 if (!is_src2_reachable) {
5596 push(rscratch1); // cmpptr trashes rscratch1
5597 }
5598 cmpptr(r12_heapbase, src2, rscratch1);
5599 jcc(Assembler::equal, ok);
5600 STOP(msg);
5601 bind(ok);
5602 if (!is_src2_reachable) {
5603 pop(rscratch1);
5604 }
5605 }
5606 }
5607 #endif
5608
5609 // Algorithm must match oop.inline.hpp encode_heap_oop.
5610 void MacroAssembler::encode_heap_oop(Register r) {
5611 #ifdef ASSERT
5612 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5613 #endif
5614 verify_oop_msg(r, "broken oop in encode_heap_oop");
5615 if (CompressedOops::base() == nullptr) {
5616 if (CompressedOops::shift() != 0) {
5617 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5618 shrq(r, LogMinObjAlignmentInBytes);
5619 }
5620 return;
5621 }
5622 testq(r, r);
5623 cmovq(Assembler::equal, r, r12_heapbase);
5624 subq(r, r12_heapbase);
5625 shrq(r, LogMinObjAlignmentInBytes);
5626 }
5627
5628 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5629 #ifdef ASSERT
5630 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5631 if (CheckCompressedOops) {
5632 Label ok;
5633 testq(r, r);
5634 jcc(Assembler::notEqual, ok);
5635 STOP("null oop passed to encode_heap_oop_not_null");
5636 bind(ok);
5637 }
5638 #endif
5639 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5640 if (CompressedOops::base() != nullptr) {
5641 subq(r, r12_heapbase);
5642 }
5643 if (CompressedOops::shift() != 0) {
5644 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5645 shrq(r, LogMinObjAlignmentInBytes);
5646 }
5647 }
5648
5649 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5650 #ifdef ASSERT
5651 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5652 if (CheckCompressedOops) {
5653 Label ok;
5654 testq(src, src);
5655 jcc(Assembler::notEqual, ok);
5656 STOP("null oop passed to encode_heap_oop_not_null2");
5657 bind(ok);
5658 }
5659 #endif
5660 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5661 if (dst != src) {
5662 movq(dst, src);
5663 }
5664 if (CompressedOops::base() != nullptr) {
5665 subq(dst, r12_heapbase);
5666 }
5667 if (CompressedOops::shift() != 0) {
5668 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5669 shrq(dst, LogMinObjAlignmentInBytes);
5670 }
5671 }
5672
5673 void MacroAssembler::decode_heap_oop(Register r) {
5674 #ifdef ASSERT
5675 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5676 #endif
5677 if (CompressedOops::base() == nullptr) {
5678 if (CompressedOops::shift() != 0) {
5679 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5680 shlq(r, LogMinObjAlignmentInBytes);
5681 }
5682 } else {
5683 Label done;
5684 shlq(r, LogMinObjAlignmentInBytes);
5685 jccb(Assembler::equal, done);
5686 addq(r, r12_heapbase);
5687 bind(done);
5688 }
5689 verify_oop_msg(r, "broken oop in decode_heap_oop");
5690 }
5691
5692 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5693 // Note: it will change flags
5694 assert (UseCompressedOops, "should only be used for compressed headers");
5695 assert (Universe::heap() != nullptr, "java heap should be initialized");
5696 // Cannot assert, unverified entry point counts instructions (see .ad file)
5697 // vtableStubs also counts instructions in pd_code_size_limit.
5698 // Also do not verify_oop as this is called by verify_oop.
5699 if (CompressedOops::shift() != 0) {
5700 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5701 shlq(r, LogMinObjAlignmentInBytes);
5702 if (CompressedOops::base() != nullptr) {
5703 addq(r, r12_heapbase);
5704 }
5705 } else {
5706 assert (CompressedOops::base() == nullptr, "sanity");
5707 }
5708 }
5709
5710 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5711 // Note: it will change flags
5712 assert (UseCompressedOops, "should only be used for compressed headers");
5713 assert (Universe::heap() != nullptr, "java heap should be initialized");
5714 // Cannot assert, unverified entry point counts instructions (see .ad file)
5715 // vtableStubs also counts instructions in pd_code_size_limit.
5716 // Also do not verify_oop as this is called by verify_oop.
5717 if (CompressedOops::shift() != 0) {
5718 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5719 if (LogMinObjAlignmentInBytes == Address::times_8) {
5720 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5721 } else {
5722 if (dst != src) {
5723 movq(dst, src);
5724 }
5725 shlq(dst, LogMinObjAlignmentInBytes);
5726 if (CompressedOops::base() != nullptr) {
5727 addq(dst, r12_heapbase);
5728 }
5729 }
5730 } else {
5731 assert (CompressedOops::base() == nullptr, "sanity");
5732 if (dst != src) {
5733 movq(dst, src);
5734 }
5735 }
5736 }
5737
5738 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5739 BLOCK_COMMENT("encode_klass_not_null {");
5740 assert_different_registers(r, tmp);
5741 if (CompressedKlassPointers::base() != nullptr) {
5742 if (AOTCodeCache::is_on_for_dump()) {
5743 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5744 } else {
5745 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5746 }
5747 subq(r, tmp);
5748 }
5749 if (CompressedKlassPointers::shift() != 0) {
5750 shrq(r, CompressedKlassPointers::shift());
5751 }
5752 BLOCK_COMMENT("} encode_klass_not_null");
5753 }
5754
5755 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5756 BLOCK_COMMENT("encode_and_move_klass_not_null {");
5757 assert_different_registers(src, dst);
5758 if (CompressedKlassPointers::base() != nullptr) {
5759 movptr(dst, -(intptr_t)CompressedKlassPointers::base());
5760 addq(dst, src);
5761 } else {
5762 movptr(dst, src);
5763 }
5764 if (CompressedKlassPointers::shift() != 0) {
5765 shrq(dst, CompressedKlassPointers::shift());
5766 }
5767 BLOCK_COMMENT("} encode_and_move_klass_not_null");
5768 }
5769
5770 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5771 BLOCK_COMMENT("decode_klass_not_null {");
5772 assert_different_registers(r, tmp);
5773 // Note: it will change flags
5774 assert(UseCompressedClassPointers, "should only be used for compressed headers");
5775 // Cannot assert, unverified entry point counts instructions (see .ad file)
5776 // vtableStubs also counts instructions in pd_code_size_limit.
5777 // Also do not verify_oop as this is called by verify_oop.
5778 if (CompressedKlassPointers::shift() != 0) {
5779 shlq(r, CompressedKlassPointers::shift());
5780 }
5781 if (CompressedKlassPointers::base() != nullptr) {
5782 if (AOTCodeCache::is_on_for_dump()) {
5783 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5784 } else {
5785 movptr(tmp, (intptr_t)CompressedKlassPointers::base());
5786 }
5787 addq(r, tmp);
5788 }
5789 BLOCK_COMMENT("} decode_klass_not_null");
5790 }
5791
5792 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5793 BLOCK_COMMENT("decode_and_move_klass_not_null {");
5794 assert_different_registers(src, dst);
5795 // Note: it will change flags
5796 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5797 // Cannot assert, unverified entry point counts instructions (see .ad file)
5798 // vtableStubs also counts instructions in pd_code_size_limit.
5799 // Also do not verify_oop as this is called by verify_oop.
5800
5801 if (CompressedKlassPointers::base() == nullptr &&
5802 CompressedKlassPointers::shift() == 0) {
5803 // The best case scenario is that there is no base or shift. Then it is already
5804 // a pointer that needs nothing but a register rename.
5805 movl(dst, src);
5806 } else {
5807 if (CompressedKlassPointers::shift() <= Address::times_8) {
5808 if (CompressedKlassPointers::base() != nullptr) {
5809 movptr(dst, (intptr_t)CompressedKlassPointers::base());
5810 } else {
5811 xorq(dst, dst);
5812 }
5813 if (CompressedKlassPointers::shift() != 0) {
5814 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
5815 leaq(dst, Address(dst, src, Address::times_8, 0));
5816 } else {
5817 addq(dst, src);
5818 }
5819 } else {
5820 if (CompressedKlassPointers::base() != nullptr) {
5821 const intptr_t base_right_shifted =
5822 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5823 movptr(dst, base_right_shifted);
5824 } else {
5825 xorq(dst, dst);
5826 }
5827 addq(dst, src);
5828 shlq(dst, CompressedKlassPointers::shift());
5829 }
5830 }
5831 BLOCK_COMMENT("} decode_and_move_klass_not_null");
5832 }
5833
5834 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5835 assert (UseCompressedOops, "should only be used for compressed headers");
5836 assert (Universe::heap() != nullptr, "java heap should be initialized");
5837 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5838 int oop_index = oop_recorder()->find_index(obj);
5839 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5840 mov_narrow_oop(dst, oop_index, rspec);
5841 }
5842
5843 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5844 assert (UseCompressedOops, "should only be used for compressed headers");
5845 assert (Universe::heap() != nullptr, "java heap should be initialized");
5846 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5847 int oop_index = oop_recorder()->find_index(obj);
5848 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5849 mov_narrow_oop(dst, oop_index, rspec);
5850 }
5851
5852 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5853 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5854 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5855 int klass_index = oop_recorder()->find_index(k);
5856 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5857 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
5858 }
5859
5860 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
5861 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5862 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5863 int klass_index = oop_recorder()->find_index(k);
5864 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5865 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
5866 }
5867
5868 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
5869 assert (UseCompressedOops, "should only be used for compressed headers");
5870 assert (Universe::heap() != nullptr, "java heap should be initialized");
5871 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5872 int oop_index = oop_recorder()->find_index(obj);
5873 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5874 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
5875 }
5876
5877 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
5878 assert (UseCompressedOops, "should only be used for compressed headers");
5879 assert (Universe::heap() != nullptr, "java heap should be initialized");
5880 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5881 int oop_index = oop_recorder()->find_index(obj);
5882 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5883 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
5884 }
5885
5886 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
5887 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5888 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5889 int klass_index = oop_recorder()->find_index(k);
5890 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5891 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
5892 }
5893
5894 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
5895 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5896 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5897 int klass_index = oop_recorder()->find_index(k);
5898 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5899 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
5900 }
5901
5902 void MacroAssembler::reinit_heapbase() {
5903 if (UseCompressedOops) {
5904 if (Universe::heap() != nullptr) {
5905 if (CompressedOops::base() == nullptr) {
5906 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
5907 } else {
5908 mov64(r12_heapbase, (int64_t)CompressedOops::base());
5909 }
5910 } else {
5911 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
5912 }
5913 }
5914 }
5915
5916 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
5917 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
5918 // An inline type might be returned. If fields are in registers we
5919 // need to allocate an inline type instance and initialize it with
5920 // the value of the fields.
5921 Label skip;
5922 // We only need a new buffered inline type if a new one is not returned
5923 testptr(rax, 1);
5924 jcc(Assembler::zero, skip);
5925 int call_offset = -1;
5926
5927 #ifdef _LP64
5928 // The following code is similar to allocate_instance but has some slight differences,
5929 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
5930 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
5931 Label slow_case;
5932 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
5933 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
5934 if (vk != nullptr) {
5935 // Called from C1, where the return type is statically known.
5936 movptr(rbx, (intptr_t)vk->get_InlineKlass());
5937 jint lh = vk->layout_helper();
5938 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
5939 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
5940 tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
5941 } else {
5942 jmp(slow_case);
5943 }
5944 } else {
5945 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
5946 mov(rbx, rax);
5947 andptr(rbx, -2);
5948 if (UseTLAB) {
5949 movl(r14, Address(rbx, Klass::layout_helper_offset()));
5950 testl(r14, Klass::_lh_instance_slow_path_bit);
5951 jcc(Assembler::notZero, slow_case);
5952 tlab_allocate(rax, r14, 0, r13, r14, slow_case);
5953 } else {
5954 jmp(slow_case);
5955 }
5956 }
5957 if (UseTLAB) {
5958 // 2. Initialize buffered inline instance header
5959 Register buffer_obj = rax;
5960 Register klass = rbx;
5961 if (UseCompactObjectHeaders) {
5962 Register mark_word = r13;
5963 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
5964 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
5965 } else {
5966 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
5967 xorl(r13, r13);
5968 store_klass_gap(buffer_obj, r13);
5969 if (vk == nullptr) {
5970 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
5971 mov(r13, klass);
5972 }
5973 store_klass(buffer_obj, klass, rscratch1);
5974 klass = r13;
5975 }
5976 // 3. Initialize its fields with an inline class specific handler
5977 if (vk != nullptr) {
5978 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
5979 } else {
5980 movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
5981 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
5982 call(rbx);
5983 }
5984 jmp(skip);
5985 }
5986 bind(slow_case);
5987 // We failed to allocate a new inline type, fall back to a runtime
5988 // call. Some oop field may be live in some registers but we can't
5989 // tell. That runtime call will take care of preserving them
5990 // across a GC if there's one.
5991 mov(rax, rscratch1);
5992 #endif
5993
5994 if (from_interpreter) {
5995 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
5996 } else {
5997 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
5998 call_offset = offset();
5999 }
6000
6001 bind(skip);
6002 return call_offset;
6003 }
6004
6005 // Move a value between registers/stack slots and update the reg_state
6006 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6007 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6008 if (reg_state[to->value()] == reg_written) {
6009 return true; // Already written
6010 }
6011 if (from != to && bt != T_VOID) {
6012 if (reg_state[to->value()] == reg_readonly) {
6013 return false; // Not yet writable
6014 }
6015 if (from->is_reg()) {
6016 if (to->is_reg()) {
6017 if (from->is_XMMRegister()) {
6018 if (bt == T_DOUBLE) {
6019 movdbl(to->as_XMMRegister(), from->as_XMMRegister());
6020 } else {
6021 assert(bt == T_FLOAT, "must be float");
6022 movflt(to->as_XMMRegister(), from->as_XMMRegister());
6023 }
6024 } else {
6025 movq(to->as_Register(), from->as_Register());
6026 }
6027 } else {
6028 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6029 Address to_addr = Address(rsp, st_off);
6030 if (from->is_XMMRegister()) {
6031 if (bt == T_DOUBLE) {
6032 movdbl(to_addr, from->as_XMMRegister());
6033 } else {
6034 assert(bt == T_FLOAT, "must be float");
6035 movflt(to_addr, from->as_XMMRegister());
6036 }
6037 } else {
6038 movq(to_addr, from->as_Register());
6039 }
6040 }
6041 } else {
6042 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
6043 if (to->is_reg()) {
6044 if (to->is_XMMRegister()) {
6045 if (bt == T_DOUBLE) {
6046 movdbl(to->as_XMMRegister(), from_addr);
6047 } else {
6048 assert(bt == T_FLOAT, "must be float");
6049 movflt(to->as_XMMRegister(), from_addr);
6050 }
6051 } else {
6052 movq(to->as_Register(), from_addr);
6053 }
6054 } else {
6055 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6056 movq(r13, from_addr);
6057 movq(Address(rsp, st_off), r13);
6058 }
6059 }
6060 }
6061 // Update register states
6062 reg_state[from->value()] = reg_writable;
6063 reg_state[to->value()] = reg_written;
6064 return true;
6065 }
6066
6067 // Calculate the extra stack space required for packing or unpacking inline
6068 // args and adjust the stack pointer.
6069 //
6070 // This extra stack space take into account the copy #2 of the return address,
6071 // but NOT the saved RBP or the normal size of the frame (see MacroAssembler::remove_frame
6072 // for notations).
6073 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6074 // Two additional slots to account for return address
6075 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
6076 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6077 // Save the return address, adjust the stack (make sure it is properly
6078 // 16-byte aligned) and copy the return address to the new top of the stack.
6079 // The stack will be repaired on return (see MacroAssembler::remove_frame).
6080 assert(sp_inc > 0, "sanity");
6081 pop(r13);
6082 subptr(rsp, sp_inc);
6083 #ifdef ASSERT
6084 movl(Address(rsp, -VMRegImpl::stack_slot_size), badRegWordVal);
6085 movl(Address(rsp, -2 * VMRegImpl::stack_slot_size), badRegWordVal);
6086 subptr(rsp, 2 * VMRegImpl::stack_slot_size);
6087 #else
6088 push(r13);
6089 #endif
6090 return sp_inc;
6091 }
6092
6093 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
6094 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6095 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6096 RegState reg_state[]) {
6097 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6098 assert(from->is_valid(), "source must be valid");
6099 bool progress = false;
6100 #ifdef ASSERT
6101 const int start_offset = offset();
6102 #endif
6103
6104 Label L_null, L_notNull;
6105 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6106 Register tmp1 = r10;
6107 Register tmp2 = r13;
6108 Register fromReg = noreg;
6109 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
6110 bool done = true;
6111 bool mark_done = true;
6112 VMReg toReg;
6113 BasicType bt;
6114 // Check if argument requires a null check
6115 bool null_check = false;
6116 VMReg nullCheckReg;
6117 while (stream.next(nullCheckReg, bt)) {
6118 if (sig->at(stream.sig_index())._offset == -1) {
6119 null_check = true;
6120 break;
6121 }
6122 }
6123 stream.reset(sig_index, to_index);
6124 while (stream.next(toReg, bt)) {
6125 assert(toReg->is_valid(), "destination must be valid");
6126 int idx = (int)toReg->value();
6127 if (reg_state[idx] == reg_readonly) {
6128 if (idx != from->value()) {
6129 mark_done = false;
6130 }
6131 done = false;
6132 continue;
6133 } else if (reg_state[idx] == reg_written) {
6134 continue;
6135 }
6136 assert(reg_state[idx] == reg_writable, "must be writable");
6137 reg_state[idx] = reg_written;
6138 progress = true;
6139
6140 if (fromReg == noreg) {
6141 if (from->is_reg()) {
6142 fromReg = from->as_Register();
6143 } else {
6144 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6145 movq(tmp1, Address(rsp, st_off));
6146 fromReg = tmp1;
6147 }
6148 if (null_check) {
6149 // Nullable inline type argument, emit null check
6150 testptr(fromReg, fromReg);
6151 jcc(Assembler::zero, L_null);
6152 }
6153 }
6154 int off = sig->at(stream.sig_index())._offset;
6155 if (off == -1) {
6156 assert(null_check, "Missing null check at");
6157 if (toReg->is_stack()) {
6158 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6159 movq(Address(rsp, st_off), 1);
6160 } else {
6161 movq(toReg->as_Register(), 1);
6162 }
6163 continue;
6164 }
6165 assert(off > 0, "offset in object should be positive");
6166 Address fromAddr = Address(fromReg, off);
6167 if (!toReg->is_XMMRegister()) {
6168 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6169 if (is_reference_type(bt)) {
6170 load_heap_oop(dst, fromAddr);
6171 } else {
6172 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6173 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6174 }
6175 if (toReg->is_stack()) {
6176 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6177 movq(Address(rsp, st_off), dst);
6178 }
6179 } else if (bt == T_DOUBLE) {
6180 movdbl(toReg->as_XMMRegister(), fromAddr);
6181 } else {
6182 assert(bt == T_FLOAT, "must be float");
6183 movflt(toReg->as_XMMRegister(), fromAddr);
6184 }
6185 }
6186 if (progress && null_check) {
6187 if (done) {
6188 jmp(L_notNull);
6189 bind(L_null);
6190 // Set null marker to zero to signal that the argument is null.
6191 // Also set all oop fields to zero to make the GC happy.
6192 stream.reset(sig_index, to_index);
6193 while (stream.next(toReg, bt)) {
6194 if (sig->at(stream.sig_index())._offset == -1 ||
6195 bt == T_OBJECT || bt == T_ARRAY) {
6196 if (toReg->is_stack()) {
6197 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6198 movq(Address(rsp, st_off), 0);
6199 } else {
6200 xorq(toReg->as_Register(), toReg->as_Register());
6201 }
6202 }
6203 }
6204 bind(L_notNull);
6205 } else {
6206 bind(L_null);
6207 }
6208 }
6209
6210 sig_index = stream.sig_index();
6211 to_index = stream.regs_index();
6212
6213 if (mark_done && reg_state[from->value()] != reg_written) {
6214 // This is okay because no one else will write to that slot
6215 reg_state[from->value()] = reg_writable;
6216 }
6217 from_index--;
6218 assert(progress || (start_offset == offset()), "should not emit code");
6219 return done;
6220 }
6221
6222 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6223 VMRegPair* from, int from_count, int& from_index, VMReg to,
6224 RegState reg_state[], Register val_array) {
6225 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
6226 assert(to->is_valid(), "destination must be valid");
6227
6228 if (reg_state[to->value()] == reg_written) {
6229 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6230 return true; // Already written
6231 }
6232
6233 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
6234 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6235 Register val_obj_tmp = r11;
6236 Register from_reg_tmp = r14;
6237 Register tmp1 = r10;
6238 Register tmp2 = r13;
6239 Register tmp3 = rbx;
6240 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6241
6242 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6243
6244 if (reg_state[to->value()] == reg_readonly) {
6245 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6246 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6247 return false; // Not yet writable
6248 }
6249 val_obj = val_obj_tmp;
6250 }
6251
6252 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
6253 load_heap_oop(val_obj, Address(val_array, index));
6254
6255 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6256 VMReg fromReg;
6257 BasicType bt;
6258 Label L_null;
6259 while (stream.next(fromReg, bt)) {
6260 assert(fromReg->is_valid(), "source must be valid");
6261 reg_state[fromReg->value()] = reg_writable;
6262
6263 int off = sig->at(stream.sig_index())._offset;
6264 if (off == -1) {
6265 // Nullable inline type argument, emit null check
6266 Label L_notNull;
6267 if (fromReg->is_stack()) {
6268 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6269 testb(Address(rsp, ld_off), 1);
6270 } else {
6271 testb(fromReg->as_Register(), 1);
6272 }
6273 jcc(Assembler::notZero, L_notNull);
6274 movptr(val_obj, 0);
6275 jmp(L_null);
6276 bind(L_notNull);
6277 continue;
6278 }
6279
6280 assert(off > 0, "offset in object should be positive");
6281 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6282
6283 // Pack the scalarized field into the value object.
6284 Address dst(val_obj, off);
6285 if (!fromReg->is_XMMRegister()) {
6286 Register src;
6287 if (fromReg->is_stack()) {
6288 src = from_reg_tmp;
6289 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6290 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
6291 } else {
6292 src = fromReg->as_Register();
6293 }
6294 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6295 if (is_reference_type(bt)) {
6296 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6297 } else {
6298 store_sized_value(dst, src, size_in_bytes);
6299 }
6300 } else if (bt == T_DOUBLE) {
6301 movdbl(dst, fromReg->as_XMMRegister());
6302 } else {
6303 assert(bt == T_FLOAT, "must be float");
6304 movflt(dst, fromReg->as_XMMRegister());
6305 }
6306 }
6307 bind(L_null);
6308 sig_index = stream.sig_index();
6309 from_index = stream.regs_index();
6310
6311 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6312 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6313 assert(success, "to register must be writeable");
6314 return true;
6315 }
6316
6317 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6318 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
6319 }
6320
6321 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6322 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6323 if (needs_stack_repair) {
6324 // The method has a scalarized entry point (where fields of value object arguments
6325 // are passed through registers and stack), and a non-scalarized entry point (where
6326 // value object arguments are given as oops). The non-scalarized entry point will
6327 // first load each field of value object arguments and store them in registers and on
6328 // the stack in a way compatible with the scalarized entry point. To do so, some extra
6329 // stack space might be reserved (if argument registers are not enough). On leaving the
6330 // method, this space must be freed.
6331 //
6332 // In case we used the non-scalarized entry point the stack looks like this:
6333 //
6334 // | Arguments from caller |
6335 // |---------------------------| <-- caller's SP
6336 // | Return address #1 |
6337 // |---------------------------|
6338 // | Extension space for |
6339 // | inline arg (un)packing |
6340 // |---------------------------|
6341 // | Return address #2 |
6342 // | Saved RBP |
6343 // |---------------------------| <-- start of this method's frame
6344 // | sp_inc |
6345 // | method locals |
6346 // |---------------------------| <-- SP
6347 //
6348 // There is two copies of the return address on the stack. They will be identical at
6349 // first, but that can change.
6350 // If the caller has been deoptimized, the copy #1 will be patched to point at the
6351 // deopt blob, and the copy #2 will still point into the old method. In short
6352 // the copy #2 is not reliable and should not be used. It is mostly needed to
6353 // add space between the extension space and the locals, as there would be between
6354 // the real arguments and the locals if we don't need to do unpacking (from the
6355 // scalarized entry point).
6356 //
6357 // When leaving, one must use the copy #1 of the return address, while keeping in mind
6358 // that from the scalarized entry point, there will be only one copy. Indeed, in the
6359 // case we used the scalarized calling convention, the stack looks like this:
6360 //
6361 // | Arguments from caller |
6362 // |---------------------------| <-- caller's SP
6363 // | Return address |
6364 // | Saved RBP |
6365 // |---------------------------| <-- start of this method's frame
6366 // | sp_inc |
6367 // | method locals |
6368 // |---------------------------| <-- SP
6369 //
6370 // The sp_inc stack slot holds the total size of the frame, including the extension
6371 // space the possible copy #2 of the return address and the saved RBP (but never the
6372 // copy #1 of the return address). That is how to find the copy #1 of the return address.
6373 // This size is expressed in bytes. Be careful when using it from C++ in pointer arithmetic;
6374 // you might need to divide it by wordSize.
6375 //
6376 // One can find sp_inc since the start the method's frame is SP + initial_framesize.
6377
6378 movq(rbp, Address(rsp, initial_framesize));
6379 // The stack increment resides just below the saved rbp
6380 addq(rsp, Address(rsp, initial_framesize - wordSize));
6381 } else {
6382 if (initial_framesize > 0) {
6383 addq(rsp, initial_framesize);
6384 }
6385 pop(rbp);
6386 }
6387 }
6388
6389 #if COMPILER2_OR_JVMCI
6390
6391 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6392 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
6393 // cnt - number of qwords (8-byte words).
6394 // base - start address, qword aligned.
6395 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6396 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
6397 if (use64byteVector) {
6398 evpbroadcastq(xtmp, val, AVX_512bit);
6399 } else if (MaxVectorSize >= 32) {
6400 movdq(xtmp, val);
6401 punpcklqdq(xtmp, xtmp);
6402 vinserti128_high(xtmp, xtmp);
6403 } else {
6404 movdq(xtmp, val);
6405 punpcklqdq(xtmp, xtmp);
6406 }
6407 jmp(L_zero_64_bytes);
6408
6409 BIND(L_loop);
6410 if (MaxVectorSize >= 32) {
6411 fill64(base, 0, xtmp, use64byteVector);
6412 } else {
6413 movdqu(Address(base, 0), xtmp);
6414 movdqu(Address(base, 16), xtmp);
6415 movdqu(Address(base, 32), xtmp);
6416 movdqu(Address(base, 48), xtmp);
6417 }
6418 addptr(base, 64);
6419
6420 BIND(L_zero_64_bytes);
6421 subptr(cnt, 8);
6422 jccb(Assembler::greaterEqual, L_loop);
6423
6424 // Copy trailing 64 bytes
6425 if (use64byteVector) {
6426 addptr(cnt, 8);
6427 jccb(Assembler::equal, L_end);
6428 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
6429 jmp(L_end);
6430 } else {
6431 addptr(cnt, 4);
6432 jccb(Assembler::less, L_tail);
6433 if (MaxVectorSize >= 32) {
6434 vmovdqu(Address(base, 0), xtmp);
6435 } else {
6436 movdqu(Address(base, 0), xtmp);
6437 movdqu(Address(base, 16), xtmp);
6438 }
6439 }
6440 addptr(base, 32);
6441 subptr(cnt, 4);
6442
6443 BIND(L_tail);
6444 addptr(cnt, 4);
6445 jccb(Assembler::lessEqual, L_end);
6446 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6447 fill32_masked(3, base, 0, xtmp, mask, cnt, val);
6448 } else {
6449 decrement(cnt);
6450
6451 BIND(L_sloop);
6452 movq(Address(base, 0), xtmp);
6453 addptr(base, 8);
6454 decrement(cnt);
6455 jccb(Assembler::greaterEqual, L_sloop);
6456 }
6457 BIND(L_end);
6458 }
6459
6460 // Clearing constant sized memory using YMM/ZMM registers.
6461 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6462 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
6463 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
6464
6465 int vector64_count = (cnt & (~0x7)) >> 3;
6466 cnt = cnt & 0x7;
6467 const int fill64_per_loop = 4;
6468 const int max_unrolled_fill64 = 8;
6469
6470 // 64 byte initialization loop.
6471 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
6472 int start64 = 0;
6473 if (vector64_count > max_unrolled_fill64) {
6474 Label LOOP;
6475 Register index = rtmp;
6476
6477 start64 = vector64_count - (vector64_count % fill64_per_loop);
6478
6479 movl(index, 0);
6480 BIND(LOOP);
6481 for (int i = 0; i < fill64_per_loop; i++) {
6482 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
6483 }
6484 addl(index, fill64_per_loop * 64);
6485 cmpl(index, start64 * 64);
6486 jccb(Assembler::less, LOOP);
6487 }
6488 for (int i = start64; i < vector64_count; i++) {
6489 fill64(base, i * 64, xtmp, use64byteVector);
6490 }
6491
6492 // Clear remaining 64 byte tail.
6493 int disp = vector64_count * 64;
6494 if (cnt) {
6495 switch (cnt) {
6496 case 1:
6497 movq(Address(base, disp), xtmp);
6498 break;
6499 case 2:
6500 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
6501 break;
6502 case 3:
6503 movl(rtmp, 0x7);
6504 kmovwl(mask, rtmp);
6505 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
6506 break;
6507 case 4:
6508 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6509 break;
6510 case 5:
6511 if (use64byteVector) {
6512 movl(rtmp, 0x1F);
6513 kmovwl(mask, rtmp);
6514 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6515 } else {
6516 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6517 movq(Address(base, disp + 32), xtmp);
6518 }
6519 break;
6520 case 6:
6521 if (use64byteVector) {
6522 movl(rtmp, 0x3F);
6523 kmovwl(mask, rtmp);
6524 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6525 } else {
6526 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6527 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
6528 }
6529 break;
6530 case 7:
6531 if (use64byteVector) {
6532 movl(rtmp, 0x7F);
6533 kmovwl(mask, rtmp);
6534 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6535 } else {
6536 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6537 movl(rtmp, 0x7);
6538 kmovwl(mask, rtmp);
6539 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
6540 }
6541 break;
6542 default:
6543 fatal("Unexpected length : %d\n",cnt);
6544 break;
6545 }
6546 }
6547 }
6548
6549 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
6550 bool is_large, bool word_copy_only, KRegister mask) {
6551 // cnt - number of qwords (8-byte words).
6552 // base - start address, qword aligned.
6553 // is_large - if optimizers know cnt is larger than InitArrayShortSize
6554 assert(base==rdi, "base register must be edi for rep stos");
6555 assert(val==rax, "val register must be eax for rep stos");
6556 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6557 assert(InitArrayShortSize % BytesPerLong == 0,
6558 "InitArrayShortSize should be the multiple of BytesPerLong");
6559
6560 Label DONE;
6561
6562 if (!is_large) {
6563 Label LOOP, LONG;
6564 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
6565 jccb(Assembler::greater, LONG);
6566
6567 decrement(cnt);
6568 jccb(Assembler::negative, DONE); // Zero length
6569
6570 // Use individual pointer-sized stores for small counts:
6571 BIND(LOOP);
6572 movptr(Address(base, cnt, Address::times_ptr), val);
6573 decrement(cnt);
6574 jccb(Assembler::greaterEqual, LOOP);
6575 jmpb(DONE);
6576
6577 BIND(LONG);
6578 }
6579
6580 // Use longer rep-prefixed ops for non-small counts:
6581 if (UseFastStosb && !word_copy_only) {
6582 shlptr(cnt, 3); // convert to number of bytes
6583 rep_stosb();
6584 } else if (UseXMMForObjInit) {
6585 xmm_clear_mem(base, cnt, val, xtmp, mask);
6586 } else {
6587 rep_stos();
6588 }
6589
6590 BIND(DONE);
6591 }
6592
6593 #endif //COMPILER2_OR_JVMCI
6594
6595
6596 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6597 Register to, Register value, Register count,
6598 Register rtmp, XMMRegister xtmp) {
6599 ShortBranchVerifier sbv(this);
6600 assert_different_registers(to, value, count, rtmp);
6601 Label L_exit;
6602 Label L_fill_2_bytes, L_fill_4_bytes;
6603
6604 #if defined(COMPILER2)
6605 if(MaxVectorSize >=32 &&
6606 VM_Version::supports_avx512vlbw() &&
6607 VM_Version::supports_bmi2()) {
6608 generate_fill_avx3(t, to, value, count, rtmp, xtmp);
6609 return;
6610 }
6611 #endif
6612
6613 int shift = -1;
6614 switch (t) {
6615 case T_BYTE:
6616 shift = 2;
6617 break;
6618 case T_SHORT:
6619 shift = 1;
6620 break;
6621 case T_INT:
6622 shift = 0;
6623 break;
6624 default: ShouldNotReachHere();
6625 }
6626
6627 if (t == T_BYTE) {
6628 andl(value, 0xff);
6629 movl(rtmp, value);
6630 shll(rtmp, 8);
6631 orl(value, rtmp);
6632 }
6633 if (t == T_SHORT) {
6634 andl(value, 0xffff);
6635 }
6636 if (t == T_BYTE || t == T_SHORT) {
6637 movl(rtmp, value);
6638 shll(rtmp, 16);
6639 orl(value, rtmp);
6640 }
6641
6642 cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
6643 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
6644 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
6645 Label L_skip_align2;
6646 // align source address at 4 bytes address boundary
6647 if (t == T_BYTE) {
6648 Label L_skip_align1;
6649 // One byte misalignment happens only for byte arrays
6650 testptr(to, 1);
6651 jccb(Assembler::zero, L_skip_align1);
6652 movb(Address(to, 0), value);
6653 increment(to);
6654 decrement(count);
6655 BIND(L_skip_align1);
6656 }
6657 // Two bytes misalignment happens only for byte and short (char) arrays
6658 testptr(to, 2);
6659 jccb(Assembler::zero, L_skip_align2);
6660 movw(Address(to, 0), value);
6661 addptr(to, 2);
6662 subptr(count, 1<<(shift-1));
6663 BIND(L_skip_align2);
6664 }
6665 {
6666 Label L_fill_32_bytes;
6667 if (!UseUnalignedLoadStores) {
6668 // align to 8 bytes, we know we are 4 byte aligned to start
6669 testptr(to, 4);
6670 jccb(Assembler::zero, L_fill_32_bytes);
6671 movl(Address(to, 0), value);
6672 addptr(to, 4);
6673 subptr(count, 1<<shift);
6674 }
6675 BIND(L_fill_32_bytes);
6676 {
6677 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
6678 movdl(xtmp, value);
6679 if (UseAVX >= 2 && UseUnalignedLoadStores) {
6680 Label L_check_fill_32_bytes;
6681 if (UseAVX > 2) {
6682 // Fill 64-byte chunks
6683 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
6684
6685 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
6686 cmpptr(count, VM_Version::avx3_threshold());
6687 jccb(Assembler::below, L_check_fill_64_bytes_avx2);
6688
6689 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
6690
6691 subptr(count, 16 << shift);
6692 jccb(Assembler::less, L_check_fill_32_bytes);
6693 align(16);
6694
6695 BIND(L_fill_64_bytes_loop_avx3);
6696 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
6697 addptr(to, 64);
6698 subptr(count, 16 << shift);
6699 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
6700 jmpb(L_check_fill_32_bytes);
6701
6702 BIND(L_check_fill_64_bytes_avx2);
6703 }
6704 // Fill 64-byte chunks
6705 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
6706
6707 subptr(count, 16 << shift);
6708 jcc(Assembler::less, L_check_fill_32_bytes);
6709
6710 // align data for 64-byte chunks
6711 Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
6712 if (EnableX86ECoreOpts) {
6713 // align 'big' arrays to cache lines to minimize split_stores
6714 cmpptr(count, 96 << shift);
6715 jcc(Assembler::below, L_fill_64_bytes_loop);
6716
6717 // Find the bytes needed for alignment
6718 movptr(rtmp, to);
6719 andptr(rtmp, 0x1c);
6720 jcc(Assembler::zero, L_fill_64_bytes_loop);
6721 negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
6722 addptr(rtmp, 32);
6723 shrptr(rtmp, 2 - shift);// get number of elements from bytes
6724 subptr(count, rtmp); // adjust count by number of elements
6725
6726 align(16);
6727 BIND(L_align_64_bytes_loop);
6728 movdl(Address(to, 0), xtmp);
6729 addptr(to, 4);
6730 subptr(rtmp, 1 << shift);
6731 jcc(Assembler::greater, L_align_64_bytes_loop);
6732 }
6733
6734 align(16);
6735 BIND(L_fill_64_bytes_loop);
6736 vmovdqu(Address(to, 0), xtmp);
6737 vmovdqu(Address(to, 32), xtmp);
6738 addptr(to, 64);
6739 subptr(count, 16 << shift);
6740 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
6741
6742 align(16);
6743 BIND(L_check_fill_32_bytes);
6744 addptr(count, 8 << shift);
6745 jccb(Assembler::less, L_check_fill_8_bytes);
6746 vmovdqu(Address(to, 0), xtmp);
6747 addptr(to, 32);
6748 subptr(count, 8 << shift);
6749
6750 BIND(L_check_fill_8_bytes);
6751 // clean upper bits of YMM registers
6752 movdl(xtmp, value);
6753 pshufd(xtmp, xtmp, 0);
6754 } else {
6755 // Fill 32-byte chunks
6756 pshufd(xtmp, xtmp, 0);
6757
6758 subptr(count, 8 << shift);
6759 jcc(Assembler::less, L_check_fill_8_bytes);
6760 align(16);
6761
6762 BIND(L_fill_32_bytes_loop);
6763
6764 if (UseUnalignedLoadStores) {
6765 movdqu(Address(to, 0), xtmp);
6766 movdqu(Address(to, 16), xtmp);
6767 } else {
6768 movq(Address(to, 0), xtmp);
6769 movq(Address(to, 8), xtmp);
6770 movq(Address(to, 16), xtmp);
6771 movq(Address(to, 24), xtmp);
6772 }
6773
6774 addptr(to, 32);
6775 subptr(count, 8 << shift);
6776 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
6777
6778 BIND(L_check_fill_8_bytes);
6779 }
6780 addptr(count, 8 << shift);
6781 jccb(Assembler::zero, L_exit);
6782 jmpb(L_fill_8_bytes);
6783
6784 //
6785 // length is too short, just fill qwords
6786 //
6787 align(16);
6788 BIND(L_fill_8_bytes_loop);
6789 movq(Address(to, 0), xtmp);
6790 addptr(to, 8);
6791 BIND(L_fill_8_bytes);
6792 subptr(count, 1 << (shift + 1));
6793 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
6794 }
6795 }
6796
6797 Label L_fill_4_bytes_loop;
6798 testl(count, 1 << shift);
6799 jccb(Assembler::zero, L_fill_2_bytes);
6800
6801 align(16);
6802 BIND(L_fill_4_bytes_loop);
6803 movl(Address(to, 0), value);
6804 addptr(to, 4);
6805
6806 BIND(L_fill_4_bytes);
6807 subptr(count, 1 << shift);
6808 jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
6809
6810 if (t == T_BYTE || t == T_SHORT) {
6811 Label L_fill_byte;
6812 BIND(L_fill_2_bytes);
6813 // fill trailing 2 bytes
6814 testl(count, 1<<(shift-1));
6815 jccb(Assembler::zero, L_fill_byte);
6816 movw(Address(to, 0), value);
6817 if (t == T_BYTE) {
6818 addptr(to, 2);
6819 BIND(L_fill_byte);
6820 // fill trailing byte
6821 testl(count, 1);
6822 jccb(Assembler::zero, L_exit);
6823 movb(Address(to, 0), value);
6824 } else {
6825 BIND(L_fill_byte);
6826 }
6827 } else {
6828 BIND(L_fill_2_bytes);
6829 }
6830 BIND(L_exit);
6831 }
6832
6833 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
6834 switch(type) {
6835 case T_BYTE:
6836 case T_BOOLEAN:
6837 evpbroadcastb(dst, src, vector_len);
6838 break;
6839 case T_SHORT:
6840 case T_CHAR:
6841 evpbroadcastw(dst, src, vector_len);
6842 break;
6843 case T_INT:
6844 case T_FLOAT:
6845 evpbroadcastd(dst, src, vector_len);
6846 break;
6847 case T_LONG:
6848 case T_DOUBLE:
6849 evpbroadcastq(dst, src, vector_len);
6850 break;
6851 default:
6852 fatal("Unhandled type : %s", type2name(type));
6853 break;
6854 }
6855 }
6856
6857 // Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
6858 //
6859 // @IntrinsicCandidate
6860 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
6861 // char[] sa, int sp, byte[] da, int dp, int len) {
6862 // int i = 0;
6863 // for (; i < len; i++) {
6864 // char c = sa[sp++];
6865 // if (c > '\u00FF')
6866 // break;
6867 // da[dp++] = (byte) c;
6868 // }
6869 // return i;
6870 // }
6871 //
6872 // @IntrinsicCandidate
6873 // int java.lang.StringCoding.encodeISOArray0(
6874 // byte[] sa, int sp, byte[] da, int dp, int len) {
6875 // int i = 0;
6876 // for (; i < len; i++) {
6877 // char c = StringUTF16.getChar(sa, sp++);
6878 // if (c > '\u00FF')
6879 // break;
6880 // da[dp++] = (byte) c;
6881 // }
6882 // return i;
6883 // }
6884 //
6885 // @IntrinsicCandidate
6886 // int java.lang.StringCoding.encodeAsciiArray0(
6887 // char[] sa, int sp, byte[] da, int dp, int len) {
6888 // int i = 0;
6889 // for (; i < len; i++) {
6890 // char c = sa[sp++];
6891 // if (c >= '\u0080')
6892 // break;
6893 // da[dp++] = (byte) c;
6894 // }
6895 // return i;
6896 // }
6897 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
6898 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
6899 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
6900 Register tmp5, Register result, bool ascii) {
6901
6902 // rsi: src
6903 // rdi: dst
6904 // rdx: len
6905 // rcx: tmp5
6906 // rax: result
6907 ShortBranchVerifier sbv(this);
6908 assert_different_registers(src, dst, len, tmp5, result);
6909 Label L_done, L_copy_1_char, L_copy_1_char_exit;
6910
6911 int mask = ascii ? 0xff80ff80 : 0xff00ff00;
6912 int short_mask = ascii ? 0xff80 : 0xff00;
6913
6914 // set result
6915 xorl(result, result);
6916 // check for zero length
6917 testl(len, len);
6918 jcc(Assembler::zero, L_done);
6919
6920 movl(result, len);
6921
6922 // Setup pointers
6923 lea(src, Address(src, len, Address::times_2)); // char[]
6924 lea(dst, Address(dst, len, Address::times_1)); // byte[]
6925 negptr(len);
6926
6927 if (UseSSE42Intrinsics || UseAVX >= 2) {
6928 Label L_copy_8_chars, L_copy_8_chars_exit;
6929 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
6930
6931 if (UseAVX >= 2) {
6932 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
6933 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
6934 movdl(tmp1Reg, tmp5);
6935 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
6936 jmp(L_chars_32_check);
6937
6938 bind(L_copy_32_chars);
6939 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
6940 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
6941 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
6942 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
6943 jccb(Assembler::notZero, L_copy_32_chars_exit);
6944 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
6945 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
6946 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
6947
6948 bind(L_chars_32_check);
6949 addptr(len, 32);
6950 jcc(Assembler::lessEqual, L_copy_32_chars);
6951
6952 bind(L_copy_32_chars_exit);
6953 subptr(len, 16);
6954 jccb(Assembler::greater, L_copy_16_chars_exit);
6955
6956 } else if (UseSSE42Intrinsics) {
6957 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector
6958 movdl(tmp1Reg, tmp5);
6959 pshufd(tmp1Reg, tmp1Reg, 0);
6960 jmpb(L_chars_16_check);
6961 }
6962
6963 bind(L_copy_16_chars);
6964 if (UseAVX >= 2) {
6965 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
6966 vptest(tmp2Reg, tmp1Reg);
6967 jcc(Assembler::notZero, L_copy_16_chars_exit);
6968 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
6969 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
6970 } else {
6971 if (UseAVX > 0) {
6972 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
6973 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
6974 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
6975 } else {
6976 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
6977 por(tmp2Reg, tmp3Reg);
6978 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
6979 por(tmp2Reg, tmp4Reg);
6980 }
6981 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector
6982 jccb(Assembler::notZero, L_copy_16_chars_exit);
6983 packuswb(tmp3Reg, tmp4Reg);
6984 }
6985 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
6986
6987 bind(L_chars_16_check);
6988 addptr(len, 16);
6989 jcc(Assembler::lessEqual, L_copy_16_chars);
6990
6991 bind(L_copy_16_chars_exit);
6992 if (UseAVX >= 2) {
6993 // clean upper bits of YMM registers
6994 vpxor(tmp2Reg, tmp2Reg);
6995 vpxor(tmp3Reg, tmp3Reg);
6996 vpxor(tmp4Reg, tmp4Reg);
6997 movdl(tmp1Reg, tmp5);
6998 pshufd(tmp1Reg, tmp1Reg, 0);
6999 }
7000 subptr(len, 8);
7001 jccb(Assembler::greater, L_copy_8_chars_exit);
7002
7003 bind(L_copy_8_chars);
7004 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7005 ptest(tmp3Reg, tmp1Reg);
7006 jccb(Assembler::notZero, L_copy_8_chars_exit);
7007 packuswb(tmp3Reg, tmp1Reg);
7008 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7009 addptr(len, 8);
7010 jccb(Assembler::lessEqual, L_copy_8_chars);
7011
7012 bind(L_copy_8_chars_exit);
7013 subptr(len, 8);
7014 jccb(Assembler::zero, L_done);
7015 }
7016
7017 bind(L_copy_1_char);
7018 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7019 testl(tmp5, short_mask); // check if Unicode or non-ASCII char
7020 jccb(Assembler::notZero, L_copy_1_char_exit);
7021 movb(Address(dst, len, Address::times_1, 0), tmp5);
7022 addptr(len, 1);
7023 jccb(Assembler::less, L_copy_1_char);
7024
7025 bind(L_copy_1_char_exit);
7026 addptr(result, len); // len is negative count of not processed elements
7027
7028 bind(L_done);
7029 }
7030
7031 /**
7032 * Helper for multiply_to_len().
7033 */
7034 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7035 addq(dest_lo, src1);
7036 adcq(dest_hi, 0);
7037 addq(dest_lo, src2);
7038 adcq(dest_hi, 0);
7039 }
7040
7041 /**
7042 * Multiply 64 bit by 64 bit first loop.
7043 */
7044 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7045 Register y, Register y_idx, Register z,
7046 Register carry, Register product,
7047 Register idx, Register kdx) {
7048 //
7049 // jlong carry, x[], y[], z[];
7050 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7051 // huge_128 product = y[idx] * x[xstart] + carry;
7052 // z[kdx] = (jlong)product;
7053 // carry = (jlong)(product >>> 64);
7054 // }
7055 // z[xstart] = carry;
7056 //
7057
7058 Label L_first_loop, L_first_loop_exit;
7059 Label L_one_x, L_one_y, L_multiply;
7060
7061 decrementl(xstart);
7062 jcc(Assembler::negative, L_one_x);
7063
7064 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7065 rorq(x_xstart, 32); // convert big-endian to little-endian
7066
7067 bind(L_first_loop);
7068 decrementl(idx);
7069 jcc(Assembler::negative, L_first_loop_exit);
7070 decrementl(idx);
7071 jcc(Assembler::negative, L_one_y);
7072 movq(y_idx, Address(y, idx, Address::times_4, 0));
7073 rorq(y_idx, 32); // convert big-endian to little-endian
7074 bind(L_multiply);
7075 movq(product, x_xstart);
7076 mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7077 addq(product, carry);
7078 adcq(rdx, 0);
7079 subl(kdx, 2);
7080 movl(Address(z, kdx, Address::times_4, 4), product);
7081 shrq(product, 32);
7082 movl(Address(z, kdx, Address::times_4, 0), product);
7083 movq(carry, rdx);
7084 jmp(L_first_loop);
7085
7086 bind(L_one_y);
7087 movl(y_idx, Address(y, 0));
7088 jmp(L_multiply);
7089
7090 bind(L_one_x);
7091 movl(x_xstart, Address(x, 0));
7092 jmp(L_first_loop);
7093
7094 bind(L_first_loop_exit);
7095 }
7096
7097 /**
7098 * Multiply 64 bit by 64 bit and add 128 bit.
7099 */
7100 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7101 Register yz_idx, Register idx,
7102 Register carry, Register product, int offset) {
7103 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7104 // z[kdx] = (jlong)product;
7105
7106 movq(yz_idx, Address(y, idx, Address::times_4, offset));
7107 rorq(yz_idx, 32); // convert big-endian to little-endian
7108 movq(product, x_xstart);
7109 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7110 movq(yz_idx, Address(z, idx, Address::times_4, offset));
7111 rorq(yz_idx, 32); // convert big-endian to little-endian
7112
7113 add2_with_carry(rdx, product, carry, yz_idx);
7114
7115 movl(Address(z, idx, Address::times_4, offset+4), product);
7116 shrq(product, 32);
7117 movl(Address(z, idx, Address::times_4, offset), product);
7118
7119 }
7120
7121 /**
7122 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7123 */
7124 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7125 Register yz_idx, Register idx, Register jdx,
7126 Register carry, Register product,
7127 Register carry2) {
7128 // jlong carry, x[], y[], z[];
7129 // int kdx = ystart+1;
7130 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7131 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7132 // z[kdx+idx+1] = (jlong)product;
7133 // jlong carry2 = (jlong)(product >>> 64);
7134 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7135 // z[kdx+idx] = (jlong)product;
7136 // carry = (jlong)(product >>> 64);
7137 // }
7138 // idx += 2;
7139 // if (idx > 0) {
7140 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7141 // z[kdx+idx] = (jlong)product;
7142 // carry = (jlong)(product >>> 64);
7143 // }
7144 //
7145
7146 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7147
7148 movl(jdx, idx);
7149 andl(jdx, 0xFFFFFFFC);
7150 shrl(jdx, 2);
7151
7152 bind(L_third_loop);
7153 subl(jdx, 1);
7154 jcc(Assembler::negative, L_third_loop_exit);
7155 subl(idx, 4);
7156
7157 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7158 movq(carry2, rdx);
7159
7160 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7161 movq(carry, rdx);
7162 jmp(L_third_loop);
7163
7164 bind (L_third_loop_exit);
7165
7166 andl (idx, 0x3);
7167 jcc(Assembler::zero, L_post_third_loop_done);
7168
7169 Label L_check_1;
7170 subl(idx, 2);
7171 jcc(Assembler::negative, L_check_1);
7172
7173 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7174 movq(carry, rdx);
7175
7176 bind (L_check_1);
7177 addl (idx, 0x2);
7178 andl (idx, 0x1);
7179 subl(idx, 1);
7180 jcc(Assembler::negative, L_post_third_loop_done);
7181
7182 movl(yz_idx, Address(y, idx, Address::times_4, 0));
7183 movq(product, x_xstart);
7184 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7185 movl(yz_idx, Address(z, idx, Address::times_4, 0));
7186
7187 add2_with_carry(rdx, product, yz_idx, carry);
7188
7189 movl(Address(z, idx, Address::times_4, 0), product);
7190 shrq(product, 32);
7191
7192 shlq(rdx, 32);
7193 orq(product, rdx);
7194 movq(carry, product);
7195
7196 bind(L_post_third_loop_done);
7197 }
7198
7199 /**
7200 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7201 *
7202 */
7203 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7204 Register carry, Register carry2,
7205 Register idx, Register jdx,
7206 Register yz_idx1, Register yz_idx2,
7207 Register tmp, Register tmp3, Register tmp4) {
7208 assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7209
7210 // jlong carry, x[], y[], z[];
7211 // int kdx = ystart+1;
7212 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7213 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7214 // jlong carry2 = (jlong)(tmp3 >>> 64);
7215 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2;
7216 // carry = (jlong)(tmp4 >>> 64);
7217 // z[kdx+idx+1] = (jlong)tmp3;
7218 // z[kdx+idx] = (jlong)tmp4;
7219 // }
7220 // idx += 2;
7221 // if (idx > 0) {
7222 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7223 // z[kdx+idx] = (jlong)yz_idx1;
7224 // carry = (jlong)(yz_idx1 >>> 64);
7225 // }
7226 //
7227
7228 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7229
7230 movl(jdx, idx);
7231 andl(jdx, 0xFFFFFFFC);
7232 shrl(jdx, 2);
7233
7234 bind(L_third_loop);
7235 subl(jdx, 1);
7236 jcc(Assembler::negative, L_third_loop_exit);
7237 subl(idx, 4);
7238
7239 movq(yz_idx1, Address(y, idx, Address::times_4, 8));
7240 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7241 movq(yz_idx2, Address(y, idx, Address::times_4, 0));
7242 rorxq(yz_idx2, yz_idx2, 32);
7243
7244 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7245 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp
7246
7247 movq(yz_idx1, Address(z, idx, Address::times_4, 8));
7248 rorxq(yz_idx1, yz_idx1, 32);
7249 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7250 rorxq(yz_idx2, yz_idx2, 32);
7251
7252 if (VM_Version::supports_adx()) {
7253 adcxq(tmp3, carry);
7254 adoxq(tmp3, yz_idx1);
7255
7256 adcxq(tmp4, tmp);
7257 adoxq(tmp4, yz_idx2);
7258
7259 movl(carry, 0); // does not affect flags
7260 adcxq(carry2, carry);
7261 adoxq(carry2, carry);
7262 } else {
7263 add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7264 add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7265 }
7266 movq(carry, carry2);
7267
7268 movl(Address(z, idx, Address::times_4, 12), tmp3);
7269 shrq(tmp3, 32);
7270 movl(Address(z, idx, Address::times_4, 8), tmp3);
7271
7272 movl(Address(z, idx, Address::times_4, 4), tmp4);
7273 shrq(tmp4, 32);
7274 movl(Address(z, idx, Address::times_4, 0), tmp4);
7275
7276 jmp(L_third_loop);
7277
7278 bind (L_third_loop_exit);
7279
7280 andl (idx, 0x3);
7281 jcc(Assembler::zero, L_post_third_loop_done);
7282
7283 Label L_check_1;
7284 subl(idx, 2);
7285 jcc(Assembler::negative, L_check_1);
7286
7287 movq(yz_idx1, Address(y, idx, Address::times_4, 0));
7288 rorxq(yz_idx1, yz_idx1, 32);
7289 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7290 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7291 rorxq(yz_idx2, yz_idx2, 32);
7292
7293 add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7294
7295 movl(Address(z, idx, Address::times_4, 4), tmp3);
7296 shrq(tmp3, 32);
7297 movl(Address(z, idx, Address::times_4, 0), tmp3);
7298 movq(carry, tmp4);
7299
7300 bind (L_check_1);
7301 addl (idx, 0x2);
7302 andl (idx, 0x1);
7303 subl(idx, 1);
7304 jcc(Assembler::negative, L_post_third_loop_done);
7305 movl(tmp4, Address(y, idx, Address::times_4, 0));
7306 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3
7307 movl(tmp4, Address(z, idx, Address::times_4, 0));
7308
7309 add2_with_carry(carry2, tmp3, tmp4, carry);
7310
7311 movl(Address(z, idx, Address::times_4, 0), tmp3);
7312 shrq(tmp3, 32);
7313
7314 shlq(carry2, 32);
7315 orq(tmp3, carry2);
7316 movq(carry, tmp3);
7317
7318 bind(L_post_third_loop_done);
7319 }
7320
7321 /**
7322 * Code for BigInteger::multiplyToLen() intrinsic.
7323 *
7324 * rdi: x
7325 * rax: xlen
7326 * rsi: y
7327 * rcx: ylen
7328 * r8: z
7329 * r11: tmp0
7330 * r12: tmp1
7331 * r13: tmp2
7332 * r14: tmp3
7333 * r15: tmp4
7334 * rbx: tmp5
7335 *
7336 */
7337 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
7338 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7339 ShortBranchVerifier sbv(this);
7340 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7341
7342 push(tmp0);
7343 push(tmp1);
7344 push(tmp2);
7345 push(tmp3);
7346 push(tmp4);
7347 push(tmp5);
7348
7349 push(xlen);
7350
7351 const Register idx = tmp1;
7352 const Register kdx = tmp2;
7353 const Register xstart = tmp3;
7354
7355 const Register y_idx = tmp4;
7356 const Register carry = tmp5;
7357 const Register product = xlen;
7358 const Register x_xstart = tmp0;
7359
7360 // First Loop.
7361 //
7362 // final static long LONG_MASK = 0xffffffffL;
7363 // int xstart = xlen - 1;
7364 // int ystart = ylen - 1;
7365 // long carry = 0;
7366 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7367 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7368 // z[kdx] = (int)product;
7369 // carry = product >>> 32;
7370 // }
7371 // z[xstart] = (int)carry;
7372 //
7373
7374 movl(idx, ylen); // idx = ylen;
7375 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
7376 xorq(carry, carry); // carry = 0;
7377
7378 Label L_done;
7379
7380 movl(xstart, xlen);
7381 decrementl(xstart);
7382 jcc(Assembler::negative, L_done);
7383
7384 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7385
7386 Label L_second_loop;
7387 testl(kdx, kdx);
7388 jcc(Assembler::zero, L_second_loop);
7389
7390 Label L_carry;
7391 subl(kdx, 1);
7392 jcc(Assembler::zero, L_carry);
7393
7394 movl(Address(z, kdx, Address::times_4, 0), carry);
7395 shrq(carry, 32);
7396 subl(kdx, 1);
7397
7398 bind(L_carry);
7399 movl(Address(z, kdx, Address::times_4, 0), carry);
7400
7401 // Second and third (nested) loops.
7402 //
7403 // for (int i = xstart-1; i >= 0; i--) { // Second loop
7404 // carry = 0;
7405 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7406 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7407 // (z[k] & LONG_MASK) + carry;
7408 // z[k] = (int)product;
7409 // carry = product >>> 32;
7410 // }
7411 // z[i] = (int)carry;
7412 // }
7413 //
7414 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7415
7416 const Register jdx = tmp1;
7417
7418 bind(L_second_loop);
7419 xorl(carry, carry); // carry = 0;
7420 movl(jdx, ylen); // j = ystart+1
7421
7422 subl(xstart, 1); // i = xstart-1;
7423 jcc(Assembler::negative, L_done);
7424
7425 push (z);
7426
7427 Label L_last_x;
7428 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7429 subl(xstart, 1); // i = xstart-1;
7430 jcc(Assembler::negative, L_last_x);
7431
7432 if (UseBMI2Instructions) {
7433 movq(rdx, Address(x, xstart, Address::times_4, 0));
7434 rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7435 } else {
7436 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7437 rorq(x_xstart, 32); // convert big-endian to little-endian
7438 }
7439
7440 Label L_third_loop_prologue;
7441 bind(L_third_loop_prologue);
7442
7443 push (x);
7444 push (xstart);
7445 push (ylen);
7446
7447
7448 if (UseBMI2Instructions) {
7449 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7450 } else { // !UseBMI2Instructions
7451 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7452 }
7453
7454 pop(ylen);
7455 pop(xlen);
7456 pop(x);
7457 pop(z);
7458
7459 movl(tmp3, xlen);
7460 addl(tmp3, 1);
7461 movl(Address(z, tmp3, Address::times_4, 0), carry);
7462 subl(tmp3, 1);
7463 jccb(Assembler::negative, L_done);
7464
7465 shrq(carry, 32);
7466 movl(Address(z, tmp3, Address::times_4, 0), carry);
7467 jmp(L_second_loop);
7468
7469 // Next infrequent code is moved outside loops.
7470 bind(L_last_x);
7471 if (UseBMI2Instructions) {
7472 movl(rdx, Address(x, 0));
7473 } else {
7474 movl(x_xstart, Address(x, 0));
7475 }
7476 jmp(L_third_loop_prologue);
7477
7478 bind(L_done);
7479
7480 pop(xlen);
7481
7482 pop(tmp5);
7483 pop(tmp4);
7484 pop(tmp3);
7485 pop(tmp2);
7486 pop(tmp1);
7487 pop(tmp0);
7488 }
7489
7490 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
7491 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
7492 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
7493 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
7494 Label VECTOR8_TAIL, VECTOR4_TAIL;
7495 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
7496 Label SAME_TILL_END, DONE;
7497 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
7498
7499 //scale is in rcx in both Win64 and Unix
7500 ShortBranchVerifier sbv(this);
7501
7502 shlq(length);
7503 xorq(result, result);
7504
7505 if ((AVX3Threshold == 0) && (UseAVX > 2) &&
7506 VM_Version::supports_avx512vlbw()) {
7507 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
7508
7509 cmpq(length, 64);
7510 jcc(Assembler::less, VECTOR32_TAIL);
7511
7512 movq(tmp1, length);
7513 andq(tmp1, 0x3F); // tail count
7514 andq(length, ~(0x3F)); //vector count
7515
7516 bind(VECTOR64_LOOP);
7517 // AVX512 code to compare 64 byte vectors.
7518 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
7519 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
7520 kortestql(k7, k7);
7521 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch
7522 addq(result, 64);
7523 subq(length, 64);
7524 jccb(Assembler::notZero, VECTOR64_LOOP);
7525
7526 //bind(VECTOR64_TAIL);
7527 testq(tmp1, tmp1);
7528 jcc(Assembler::zero, SAME_TILL_END);
7529
7530 //bind(VECTOR64_TAIL);
7531 // AVX512 code to compare up to 63 byte vectors.
7532 mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
7533 shlxq(tmp2, tmp2, tmp1);
7534 notq(tmp2);
7535 kmovql(k3, tmp2);
7536
7537 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
7538 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
7539
7540 ktestql(k7, k3);
7541 jcc(Assembler::below, SAME_TILL_END); // not mismatch
7542
7543 bind(VECTOR64_NOT_EQUAL);
7544 kmovql(tmp1, k7);
7545 notq(tmp1);
7546 tzcntq(tmp1, tmp1);
7547 addq(result, tmp1);
7548 shrq(result);
7549 jmp(DONE);
7550 bind(VECTOR32_TAIL);
7551 }
7552
7553 cmpq(length, 8);
7554 jcc(Assembler::equal, VECTOR8_LOOP);
7555 jcc(Assembler::less, VECTOR4_TAIL);
7556
7557 if (UseAVX >= 2) {
7558 Label VECTOR16_TAIL, VECTOR32_LOOP;
7559
7560 cmpq(length, 16);
7561 jcc(Assembler::equal, VECTOR16_LOOP);
7562 jcc(Assembler::less, VECTOR8_LOOP);
7563
7564 cmpq(length, 32);
7565 jccb(Assembler::less, VECTOR16_TAIL);
7566
7567 subq(length, 32);
7568 bind(VECTOR32_LOOP);
7569 vmovdqu(rymm0, Address(obja, result));
7570 vmovdqu(rymm1, Address(objb, result));
7571 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
7572 vptest(rymm2, rymm2);
7573 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
7574 addq(result, 32);
7575 subq(length, 32);
7576 jcc(Assembler::greaterEqual, VECTOR32_LOOP);
7577 addq(length, 32);
7578 jcc(Assembler::equal, SAME_TILL_END);
7579 //falling through if less than 32 bytes left //close the branch here.
7580
7581 bind(VECTOR16_TAIL);
7582 cmpq(length, 16);
7583 jccb(Assembler::less, VECTOR8_TAIL);
7584 bind(VECTOR16_LOOP);
7585 movdqu(rymm0, Address(obja, result));
7586 movdqu(rymm1, Address(objb, result));
7587 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
7588 ptest(rymm2, rymm2);
7589 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7590 addq(result, 16);
7591 subq(length, 16);
7592 jcc(Assembler::equal, SAME_TILL_END);
7593 //falling through if less than 16 bytes left
7594 } else {//regular intrinsics
7595
7596 cmpq(length, 16);
7597 jccb(Assembler::less, VECTOR8_TAIL);
7598
7599 subq(length, 16);
7600 bind(VECTOR16_LOOP);
7601 movdqu(rymm0, Address(obja, result));
7602 movdqu(rymm1, Address(objb, result));
7603 pxor(rymm0, rymm1);
7604 ptest(rymm0, rymm0);
7605 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
7606 addq(result, 16);
7607 subq(length, 16);
7608 jccb(Assembler::greaterEqual, VECTOR16_LOOP);
7609 addq(length, 16);
7610 jcc(Assembler::equal, SAME_TILL_END);
7611 //falling through if less than 16 bytes left
7612 }
7613
7614 bind(VECTOR8_TAIL);
7615 cmpq(length, 8);
7616 jccb(Assembler::less, VECTOR4_TAIL);
7617 bind(VECTOR8_LOOP);
7618 movq(tmp1, Address(obja, result));
7619 movq(tmp2, Address(objb, result));
7620 xorq(tmp1, tmp2);
7621 testq(tmp1, tmp1);
7622 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
7623 addq(result, 8);
7624 subq(length, 8);
7625 jcc(Assembler::equal, SAME_TILL_END);
7626 //falling through if less than 8 bytes left
7627
7628 bind(VECTOR4_TAIL);
7629 cmpq(length, 4);
7630 jccb(Assembler::less, BYTES_TAIL);
7631 bind(VECTOR4_LOOP);
7632 movl(tmp1, Address(obja, result));
7633 xorl(tmp1, Address(objb, result));
7634 testl(tmp1, tmp1);
7635 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
7636 addq(result, 4);
7637 subq(length, 4);
7638 jcc(Assembler::equal, SAME_TILL_END);
7639 //falling through if less than 4 bytes left
7640
7641 bind(BYTES_TAIL);
7642 bind(BYTES_LOOP);
7643 load_unsigned_byte(tmp1, Address(obja, result));
7644 load_unsigned_byte(tmp2, Address(objb, result));
7645 xorl(tmp1, tmp2);
7646 testl(tmp1, tmp1);
7647 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7648 decq(length);
7649 jcc(Assembler::zero, SAME_TILL_END);
7650 incq(result);
7651 load_unsigned_byte(tmp1, Address(obja, result));
7652 load_unsigned_byte(tmp2, Address(objb, result));
7653 xorl(tmp1, tmp2);
7654 testl(tmp1, tmp1);
7655 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7656 decq(length);
7657 jcc(Assembler::zero, SAME_TILL_END);
7658 incq(result);
7659 load_unsigned_byte(tmp1, Address(obja, result));
7660 load_unsigned_byte(tmp2, Address(objb, result));
7661 xorl(tmp1, tmp2);
7662 testl(tmp1, tmp1);
7663 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
7664 jmp(SAME_TILL_END);
7665
7666 if (UseAVX >= 2) {
7667 bind(VECTOR32_NOT_EQUAL);
7668 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
7669 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
7670 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
7671 vpmovmskb(tmp1, rymm0);
7672 bsfq(tmp1, tmp1);
7673 addq(result, tmp1);
7674 shrq(result);
7675 jmp(DONE);
7676 }
7677
7678 bind(VECTOR16_NOT_EQUAL);
7679 if (UseAVX >= 2) {
7680 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
7681 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
7682 pxor(rymm0, rymm2);
7683 } else {
7684 pcmpeqb(rymm2, rymm2);
7685 pxor(rymm0, rymm1);
7686 pcmpeqb(rymm0, rymm1);
7687 pxor(rymm0, rymm2);
7688 }
7689 pmovmskb(tmp1, rymm0);
7690 bsfq(tmp1, tmp1);
7691 addq(result, tmp1);
7692 shrq(result);
7693 jmpb(DONE);
7694
7695 bind(VECTOR8_NOT_EQUAL);
7696 bind(VECTOR4_NOT_EQUAL);
7697 bsfq(tmp1, tmp1);
7698 shrq(tmp1, 3);
7699 addq(result, tmp1);
7700 bind(BYTES_NOT_EQUAL);
7701 shrq(result);
7702 jmpb(DONE);
7703
7704 bind(SAME_TILL_END);
7705 mov64(result, -1);
7706
7707 bind(DONE);
7708 }
7709
7710 //Helper functions for square_to_len()
7711
7712 /**
7713 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7714 * Preserves x and z and modifies rest of the registers.
7715 */
7716 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7717 // Perform square and right shift by 1
7718 // Handle odd xlen case first, then for even xlen do the following
7719 // jlong carry = 0;
7720 // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7721 // huge_128 product = x[j:j+1] * x[j:j+1];
7722 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7723 // z[i+2:i+3] = (jlong)(product >>> 1);
7724 // carry = (jlong)product;
7725 // }
7726
7727 xorq(tmp5, tmp5); // carry
7728 xorq(rdxReg, rdxReg);
7729 xorl(tmp1, tmp1); // index for x
7730 xorl(tmp4, tmp4); // index for z
7731
7732 Label L_first_loop, L_first_loop_exit;
7733
7734 testl(xlen, 1);
7735 jccb(Assembler::zero, L_first_loop); //jump if xlen is even
7736
7737 // Square and right shift by 1 the odd element using 32 bit multiply
7738 movl(raxReg, Address(x, tmp1, Address::times_4, 0));
7739 imulq(raxReg, raxReg);
7740 shrq(raxReg, 1);
7741 adcq(tmp5, 0);
7742 movq(Address(z, tmp4, Address::times_4, 0), raxReg);
7743 incrementl(tmp1);
7744 addl(tmp4, 2);
7745
7746 // Square and right shift by 1 the rest using 64 bit multiply
7747 bind(L_first_loop);
7748 cmpptr(tmp1, xlen);
7749 jccb(Assembler::equal, L_first_loop_exit);
7750
7751 // Square
7752 movq(raxReg, Address(x, tmp1, Address::times_4, 0));
7753 rorq(raxReg, 32); // convert big-endian to little-endian
7754 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
7755
7756 // Right shift by 1 and save carry
7757 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
7758 rcrq(rdxReg, 1);
7759 rcrq(raxReg, 1);
7760 adcq(tmp5, 0);
7761
7762 // Store result in z
7763 movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
7764 movq(Address(z, tmp4, Address::times_4, 8), raxReg);
7765
7766 // Update indices for x and z
7767 addl(tmp1, 2);
7768 addl(tmp4, 4);
7769 jmp(L_first_loop);
7770
7771 bind(L_first_loop_exit);
7772 }
7773
7774
7775 /**
7776 * Perform the following multiply add operation using BMI2 instructions
7777 * carry:sum = sum + op1*op2 + carry
7778 * op2 should be in rdx
7779 * op2 is preserved, all other registers are modified
7780 */
7781 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
7782 // assert op2 is rdx
7783 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
7784 addq(sum, carry);
7785 adcq(tmp2, 0);
7786 addq(sum, op1);
7787 adcq(tmp2, 0);
7788 movq(carry, tmp2);
7789 }
7790
7791 /**
7792 * Perform the following multiply add operation:
7793 * carry:sum = sum + op1*op2 + carry
7794 * Preserves op1, op2 and modifies rest of registers
7795 */
7796 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
7797 // rdx:rax = op1 * op2
7798 movq(raxReg, op2);
7799 mulq(op1);
7800
7801 // rdx:rax = sum + carry + rdx:rax
7802 addq(sum, carry);
7803 adcq(rdxReg, 0);
7804 addq(sum, raxReg);
7805 adcq(rdxReg, 0);
7806
7807 // carry:sum = rdx:sum
7808 movq(carry, rdxReg);
7809 }
7810
7811 /**
7812 * Add 64 bit long carry into z[] with carry propagation.
7813 * Preserves z and carry register values and modifies rest of registers.
7814 *
7815 */
7816 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
7817 Label L_fourth_loop, L_fourth_loop_exit;
7818
7819 movl(tmp1, 1);
7820 subl(zlen, 2);
7821 addq(Address(z, zlen, Address::times_4, 0), carry);
7822
7823 bind(L_fourth_loop);
7824 jccb(Assembler::carryClear, L_fourth_loop_exit);
7825 subl(zlen, 2);
7826 jccb(Assembler::negative, L_fourth_loop_exit);
7827 addq(Address(z, zlen, Address::times_4, 0), tmp1);
7828 jmp(L_fourth_loop);
7829 bind(L_fourth_loop_exit);
7830 }
7831
7832 /**
7833 * Shift z[] left by 1 bit.
7834 * Preserves x, len, z and zlen registers and modifies rest of the registers.
7835 *
7836 */
7837 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
7838
7839 Label L_fifth_loop, L_fifth_loop_exit;
7840
7841 // Fifth loop
7842 // Perform primitiveLeftShift(z, zlen, 1)
7843
7844 const Register prev_carry = tmp1;
7845 const Register new_carry = tmp4;
7846 const Register value = tmp2;
7847 const Register zidx = tmp3;
7848
7849 // int zidx, carry;
7850 // long value;
7851 // carry = 0;
7852 // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
7853 // (carry:value) = (z[i] << 1) | carry ;
7854 // z[i] = value;
7855 // }
7856
7857 movl(zidx, zlen);
7858 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
7859
7860 bind(L_fifth_loop);
7861 decl(zidx); // Use decl to preserve carry flag
7862 decl(zidx);
7863 jccb(Assembler::negative, L_fifth_loop_exit);
7864
7865 if (UseBMI2Instructions) {
7866 movq(value, Address(z, zidx, Address::times_4, 0));
7867 rclq(value, 1);
7868 rorxq(value, value, 32);
7869 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
7870 }
7871 else {
7872 // clear new_carry
7873 xorl(new_carry, new_carry);
7874
7875 // Shift z[i] by 1, or in previous carry and save new carry
7876 movq(value, Address(z, zidx, Address::times_4, 0));
7877 shlq(value, 1);
7878 adcl(new_carry, 0);
7879
7880 orq(value, prev_carry);
7881 rorq(value, 0x20);
7882 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
7883
7884 // Set previous carry = new carry
7885 movl(prev_carry, new_carry);
7886 }
7887 jmp(L_fifth_loop);
7888
7889 bind(L_fifth_loop_exit);
7890 }
7891
7892
7893 /**
7894 * Code for BigInteger::squareToLen() intrinsic
7895 *
7896 * rdi: x
7897 * rsi: len
7898 * r8: z
7899 * rcx: zlen
7900 * r12: tmp1
7901 * r13: tmp2
7902 * r14: tmp3
7903 * r15: tmp4
7904 * rbx: tmp5
7905 *
7906 */
7907 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7908
7909 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
7910 push(tmp1);
7911 push(tmp2);
7912 push(tmp3);
7913 push(tmp4);
7914 push(tmp5);
7915
7916 // First loop
7917 // Store the squares, right shifted one bit (i.e., divided by 2).
7918 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
7919
7920 // Add in off-diagonal sums.
7921 //
7922 // Second, third (nested) and fourth loops.
7923 // zlen +=2;
7924 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
7925 // carry = 0;
7926 // long op2 = x[xidx:xidx+1];
7927 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
7928 // k -= 2;
7929 // long op1 = x[j:j+1];
7930 // long sum = z[k:k+1];
7931 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
7932 // z[k:k+1] = sum;
7933 // }
7934 // add_one_64(z, k, carry, tmp_regs);
7935 // }
7936
7937 const Register carry = tmp5;
7938 const Register sum = tmp3;
7939 const Register op1 = tmp4;
7940 Register op2 = tmp2;
7941
7942 push(zlen);
7943 push(len);
7944 addl(zlen,2);
7945 bind(L_second_loop);
7946 xorq(carry, carry);
7947 subl(zlen, 4);
7948 subl(len, 2);
7949 push(zlen);
7950 push(len);
7951 cmpl(len, 0);
7952 jccb(Assembler::lessEqual, L_second_loop_exit);
7953
7954 // Multiply an array by one 64 bit long.
7955 if (UseBMI2Instructions) {
7956 op2 = rdxReg;
7957 movq(op2, Address(x, len, Address::times_4, 0));
7958 rorxq(op2, op2, 32);
7959 }
7960 else {
7961 movq(op2, Address(x, len, Address::times_4, 0));
7962 rorq(op2, 32);
7963 }
7964
7965 bind(L_third_loop);
7966 decrementl(len);
7967 jccb(Assembler::negative, L_third_loop_exit);
7968 decrementl(len);
7969 jccb(Assembler::negative, L_last_x);
7970
7971 movq(op1, Address(x, len, Address::times_4, 0));
7972 rorq(op1, 32);
7973
7974 bind(L_multiply);
7975 subl(zlen, 2);
7976 movq(sum, Address(z, zlen, Address::times_4, 0));
7977
7978 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
7979 if (UseBMI2Instructions) {
7980 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
7981 }
7982 else {
7983 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
7984 }
7985
7986 movq(Address(z, zlen, Address::times_4, 0), sum);
7987
7988 jmp(L_third_loop);
7989 bind(L_third_loop_exit);
7990
7991 // Fourth loop
7992 // Add 64 bit long carry into z with carry propagation.
7993 // Uses offsetted zlen.
7994 add_one_64(z, zlen, carry, tmp1);
7995
7996 pop(len);
7997 pop(zlen);
7998 jmp(L_second_loop);
7999
8000 // Next infrequent code is moved outside loops.
8001 bind(L_last_x);
8002 movl(op1, Address(x, 0));
8003 jmp(L_multiply);
8004
8005 bind(L_second_loop_exit);
8006 pop(len);
8007 pop(zlen);
8008 pop(len);
8009 pop(zlen);
8010
8011 // Fifth loop
8012 // Shift z left 1 bit.
8013 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8014
8015 // z[zlen-1] |= x[len-1] & 1;
8016 movl(tmp3, Address(x, len, Address::times_4, -4));
8017 andl(tmp3, 1);
8018 orl(Address(z, zlen, Address::times_4, -4), tmp3);
8019
8020 pop(tmp5);
8021 pop(tmp4);
8022 pop(tmp3);
8023 pop(tmp2);
8024 pop(tmp1);
8025 }
8026
8027 /**
8028 * Helper function for mul_add()
8029 * Multiply the in[] by int k and add to out[] starting at offset offs using
8030 * 128 bit by 32 bit multiply and return the carry in tmp5.
8031 * Only quad int aligned length of in[] is operated on in this function.
8032 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8033 * This function preserves out, in and k registers.
8034 * len and offset point to the appropriate index in "in" & "out" correspondingly
8035 * tmp5 has the carry.
8036 * other registers are temporary and are modified.
8037 *
8038 */
8039 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8040 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8041 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8042
8043 Label L_first_loop, L_first_loop_exit;
8044
8045 movl(tmp1, len);
8046 shrl(tmp1, 2);
8047
8048 bind(L_first_loop);
8049 subl(tmp1, 1);
8050 jccb(Assembler::negative, L_first_loop_exit);
8051
8052 subl(len, 4);
8053 subl(offset, 4);
8054
8055 Register op2 = tmp2;
8056 const Register sum = tmp3;
8057 const Register op1 = tmp4;
8058 const Register carry = tmp5;
8059
8060 if (UseBMI2Instructions) {
8061 op2 = rdxReg;
8062 }
8063
8064 movq(op1, Address(in, len, Address::times_4, 8));
8065 rorq(op1, 32);
8066 movq(sum, Address(out, offset, Address::times_4, 8));
8067 rorq(sum, 32);
8068 if (UseBMI2Instructions) {
8069 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8070 }
8071 else {
8072 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8073 }
8074 // Store back in big endian from little endian
8075 rorq(sum, 0x20);
8076 movq(Address(out, offset, Address::times_4, 8), sum);
8077
8078 movq(op1, Address(in, len, Address::times_4, 0));
8079 rorq(op1, 32);
8080 movq(sum, Address(out, offset, Address::times_4, 0));
8081 rorq(sum, 32);
8082 if (UseBMI2Instructions) {
8083 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8084 }
8085 else {
8086 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8087 }
8088 // Store back in big endian from little endian
8089 rorq(sum, 0x20);
8090 movq(Address(out, offset, Address::times_4, 0), sum);
8091
8092 jmp(L_first_loop);
8093 bind(L_first_loop_exit);
8094 }
8095
8096 /**
8097 * Code for BigInteger::mulAdd() intrinsic
8098 *
8099 * rdi: out
8100 * rsi: in
8101 * r11: offs (out.length - offset)
8102 * rcx: len
8103 * r8: k
8104 * r12: tmp1
8105 * r13: tmp2
8106 * r14: tmp3
8107 * r15: tmp4
8108 * rbx: tmp5
8109 * Multiply the in[] by word k and add to out[], return the carry in rax
8110 */
8111 void MacroAssembler::mul_add(Register out, Register in, Register offs,
8112 Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8113 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8114
8115 Label L_carry, L_last_in, L_done;
8116
8117 // carry = 0;
8118 // for (int j=len-1; j >= 0; j--) {
8119 // long product = (in[j] & LONG_MASK) * kLong +
8120 // (out[offs] & LONG_MASK) + carry;
8121 // out[offs--] = (int)product;
8122 // carry = product >>> 32;
8123 // }
8124 //
8125 push(tmp1);
8126 push(tmp2);
8127 push(tmp3);
8128 push(tmp4);
8129 push(tmp5);
8130
8131 Register op2 = tmp2;
8132 const Register sum = tmp3;
8133 const Register op1 = tmp4;
8134 const Register carry = tmp5;
8135
8136 if (UseBMI2Instructions) {
8137 op2 = rdxReg;
8138 movl(op2, k);
8139 }
8140 else {
8141 movl(op2, k);
8142 }
8143
8144 xorq(carry, carry);
8145
8146 //First loop
8147
8148 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8149 //The carry is in tmp5
8150 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8151
8152 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8153 decrementl(len);
8154 jccb(Assembler::negative, L_carry);
8155 decrementl(len);
8156 jccb(Assembler::negative, L_last_in);
8157
8158 movq(op1, Address(in, len, Address::times_4, 0));
8159 rorq(op1, 32);
8160
8161 subl(offs, 2);
8162 movq(sum, Address(out, offs, Address::times_4, 0));
8163 rorq(sum, 32);
8164
8165 if (UseBMI2Instructions) {
8166 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8167 }
8168 else {
8169 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8170 }
8171
8172 // Store back in big endian from little endian
8173 rorq(sum, 0x20);
8174 movq(Address(out, offs, Address::times_4, 0), sum);
8175
8176 testl(len, len);
8177 jccb(Assembler::zero, L_carry);
8178
8179 //Multiply the last in[] entry, if any
8180 bind(L_last_in);
8181 movl(op1, Address(in, 0));
8182 movl(sum, Address(out, offs, Address::times_4, -4));
8183
8184 movl(raxReg, k);
8185 mull(op1); //tmp4 * eax -> edx:eax
8186 addl(sum, carry);
8187 adcl(rdxReg, 0);
8188 addl(sum, raxReg);
8189 adcl(rdxReg, 0);
8190 movl(carry, rdxReg);
8191
8192 movl(Address(out, offs, Address::times_4, -4), sum);
8193
8194 bind(L_carry);
8195 //return tmp5/carry as carry in rax
8196 movl(rax, carry);
8197
8198 bind(L_done);
8199 pop(tmp5);
8200 pop(tmp4);
8201 pop(tmp3);
8202 pop(tmp2);
8203 pop(tmp1);
8204 }
8205
8206 /**
8207 * Emits code to update CRC-32 with a byte value according to constants in table
8208 *
8209 * @param [in,out]crc Register containing the crc.
8210 * @param [in]val Register containing the byte to fold into the CRC.
8211 * @param [in]table Register containing the table of crc constants.
8212 *
8213 * uint32_t crc;
8214 * val = crc_table[(val ^ crc) & 0xFF];
8215 * crc = val ^ (crc >> 8);
8216 *
8217 */
8218 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8219 xorl(val, crc);
8220 andl(val, 0xFF);
8221 shrl(crc, 8); // unsigned shift
8222 xorl(crc, Address(table, val, Address::times_4, 0));
8223 }
8224
8225 /**
8226 * Fold 128-bit data chunk
8227 */
8228 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8229 if (UseAVX > 0) {
8230 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8231 vpclmulldq(xcrc, xK, xcrc); // [63:0]
8232 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
8233 pxor(xcrc, xtmp);
8234 } else {
8235 movdqa(xtmp, xcrc);
8236 pclmulhdq(xtmp, xK); // [123:64]
8237 pclmulldq(xcrc, xK); // [63:0]
8238 pxor(xcrc, xtmp);
8239 movdqu(xtmp, Address(buf, offset));
8240 pxor(xcrc, xtmp);
8241 }
8242 }
8243
8244 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8245 if (UseAVX > 0) {
8246 vpclmulhdq(xtmp, xK, xcrc);
8247 vpclmulldq(xcrc, xK, xcrc);
8248 pxor(xcrc, xbuf);
8249 pxor(xcrc, xtmp);
8250 } else {
8251 movdqa(xtmp, xcrc);
8252 pclmulhdq(xtmp, xK);
8253 pclmulldq(xcrc, xK);
8254 pxor(xcrc, xbuf);
8255 pxor(xcrc, xtmp);
8256 }
8257 }
8258
8259 /**
8260 * 8-bit folds to compute 32-bit CRC
8261 *
8262 * uint64_t xcrc;
8263 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8264 */
8265 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8266 movdl(tmp, xcrc);
8267 andl(tmp, 0xFF);
8268 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8269 psrldq(xcrc, 1); // unsigned shift one byte
8270 pxor(xcrc, xtmp);
8271 }
8272
8273 /**
8274 * uint32_t crc;
8275 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8276 */
8277 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8278 movl(tmp, crc);
8279 andl(tmp, 0xFF);
8280 shrl(crc, 8);
8281 xorl(crc, Address(table, tmp, Address::times_4, 0));
8282 }
8283
8284 /**
8285 * @param crc register containing existing CRC (32-bit)
8286 * @param buf register pointing to input byte buffer (byte*)
8287 * @param len register containing number of bytes
8288 * @param table register that will contain address of CRC table
8289 * @param tmp scratch register
8290 */
8291 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8292 assert_different_registers(crc, buf, len, table, tmp, rax);
8293
8294 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8295 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8296
8297 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8298 // context for the registers used, where all instructions below are using 128-bit mode
8299 // On EVEX without VL and BW, these instructions will all be AVX.
8300 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8301 notl(crc); // ~crc
8302 cmpl(len, 16);
8303 jcc(Assembler::less, L_tail);
8304
8305 // Align buffer to 16 bytes
8306 movl(tmp, buf);
8307 andl(tmp, 0xF);
8308 jccb(Assembler::zero, L_aligned);
8309 subl(tmp, 16);
8310 addl(len, tmp);
8311
8312 align(4);
8313 BIND(L_align_loop);
8314 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8315 update_byte_crc32(crc, rax, table);
8316 increment(buf);
8317 incrementl(tmp);
8318 jccb(Assembler::less, L_align_loop);
8319
8320 BIND(L_aligned);
8321 movl(tmp, len); // save
8322 shrl(len, 4);
8323 jcc(Assembler::zero, L_tail_restore);
8324
8325 // Fold crc into first bytes of vector
8326 movdqa(xmm1, Address(buf, 0));
8327 movdl(rax, xmm1);
8328 xorl(crc, rax);
8329 if (VM_Version::supports_sse4_1()) {
8330 pinsrd(xmm1, crc, 0);
8331 } else {
8332 pinsrw(xmm1, crc, 0);
8333 shrl(crc, 16);
8334 pinsrw(xmm1, crc, 1);
8335 }
8336 addptr(buf, 16);
8337 subl(len, 4); // len > 0
8338 jcc(Assembler::less, L_fold_tail);
8339
8340 movdqa(xmm2, Address(buf, 0));
8341 movdqa(xmm3, Address(buf, 16));
8342 movdqa(xmm4, Address(buf, 32));
8343 addptr(buf, 48);
8344 subl(len, 3);
8345 jcc(Assembler::lessEqual, L_fold_512b);
8346
8347 // Fold total 512 bits of polynomial on each iteration,
8348 // 128 bits per each of 4 parallel streams.
8349 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
8350
8351 align32();
8352 BIND(L_fold_512b_loop);
8353 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8354 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8355 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8356 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8357 addptr(buf, 64);
8358 subl(len, 4);
8359 jcc(Assembler::greater, L_fold_512b_loop);
8360
8361 // Fold 512 bits to 128 bits.
8362 BIND(L_fold_512b);
8363 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8364 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8365 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8366 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8367
8368 // Fold the rest of 128 bits data chunks
8369 BIND(L_fold_tail);
8370 addl(len, 3);
8371 jccb(Assembler::lessEqual, L_fold_128b);
8372 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
8373
8374 BIND(L_fold_tail_loop);
8375 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8376 addptr(buf, 16);
8377 decrementl(len);
8378 jccb(Assembler::greater, L_fold_tail_loop);
8379
8380 // Fold 128 bits in xmm1 down into 32 bits in crc register.
8381 BIND(L_fold_128b);
8382 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
8383 if (UseAVX > 0) {
8384 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8385 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
8386 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8387 } else {
8388 movdqa(xmm2, xmm0);
8389 pclmulqdq(xmm2, xmm1, 0x1);
8390 movdqa(xmm3, xmm0);
8391 pand(xmm3, xmm2);
8392 pclmulqdq(xmm0, xmm3, 0x1);
8393 }
8394 psrldq(xmm1, 8);
8395 psrldq(xmm2, 4);
8396 pxor(xmm0, xmm1);
8397 pxor(xmm0, xmm2);
8398
8399 // 8 8-bit folds to compute 32-bit CRC.
8400 for (int j = 0; j < 4; j++) {
8401 fold_8bit_crc32(xmm0, table, xmm1, rax);
8402 }
8403 movdl(crc, xmm0); // mov 32 bits to general register
8404 for (int j = 0; j < 4; j++) {
8405 fold_8bit_crc32(crc, table, rax);
8406 }
8407
8408 BIND(L_tail_restore);
8409 movl(len, tmp); // restore
8410 BIND(L_tail);
8411 andl(len, 0xf);
8412 jccb(Assembler::zero, L_exit);
8413
8414 // Fold the rest of bytes
8415 align(4);
8416 BIND(L_tail_loop);
8417 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8418 update_byte_crc32(crc, rax, table);
8419 increment(buf);
8420 decrementl(len);
8421 jccb(Assembler::greater, L_tail_loop);
8422
8423 BIND(L_exit);
8424 notl(crc); // ~c
8425 }
8426
8427 // Helper function for AVX 512 CRC32
8428 // Fold 512-bit data chunks
8429 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
8430 Register pos, int offset) {
8431 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
8432 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
8433 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
8434 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
8435 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
8436 }
8437
8438 // Helper function for AVX 512 CRC32
8439 // Compute CRC32 for < 256B buffers
8440 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
8441 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
8442 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
8443
8444 Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
8445 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
8446 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
8447
8448 // check if there is enough buffer to be able to fold 16B at a time
8449 cmpl(len, 32);
8450 jcc(Assembler::less, L_less_than_32);
8451
8452 // if there is, load the constants
8453 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10
8454 movdl(xmm0, crc); // get the initial crc value
8455 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8456 pxor(xmm7, xmm0);
8457
8458 // update the buffer pointer
8459 addl(pos, 16);
8460 //update the counter.subtract 32 instead of 16 to save one instruction from the loop
8461 subl(len, 32);
8462 jmp(L_16B_reduction_loop);
8463
8464 bind(L_less_than_32);
8465 //mov initial crc to the return value. this is necessary for zero - length buffers.
8466 movl(rax, crc);
8467 testl(len, len);
8468 jcc(Assembler::equal, L_cleanup);
8469
8470 movdl(xmm0, crc); //get the initial crc value
8471
8472 cmpl(len, 16);
8473 jcc(Assembler::equal, L_exact_16_left);
8474 jcc(Assembler::less, L_less_than_16_left);
8475
8476 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
8477 pxor(xmm7, xmm0); //xor the initial crc value
8478 addl(pos, 16);
8479 subl(len, 16);
8480 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10
8481 jmp(L_get_last_two_xmms);
8482
8483 bind(L_less_than_16_left);
8484 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
8485 pxor(xmm1, xmm1);
8486 movptr(tmp1, rsp);
8487 movdqu(Address(tmp1, 0 * 16), xmm1);
8488
8489 cmpl(len, 4);
8490 jcc(Assembler::less, L_only_less_than_4);
8491
8492 //backup the counter value
8493 movl(tmp2, len);
8494 cmpl(len, 8);
8495 jcc(Assembler::less, L_less_than_8_left);
8496
8497 //load 8 Bytes
8498 movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
8499 movq(Address(tmp1, 0 * 16), rax);
8500 addptr(tmp1, 8);
8501 subl(len, 8);
8502 addl(pos, 8);
8503
8504 bind(L_less_than_8_left);
8505 cmpl(len, 4);
8506 jcc(Assembler::less, L_less_than_4_left);
8507
8508 //load 4 Bytes
8509 movl(rax, Address(buf, pos, Address::times_1, 0));
8510 movl(Address(tmp1, 0 * 16), rax);
8511 addptr(tmp1, 4);
8512 subl(len, 4);
8513 addl(pos, 4);
8514
8515 bind(L_less_than_4_left);
8516 cmpl(len, 2);
8517 jcc(Assembler::less, L_less_than_2_left);
8518
8519 // load 2 Bytes
8520 movw(rax, Address(buf, pos, Address::times_1, 0));
8521 movl(Address(tmp1, 0 * 16), rax);
8522 addptr(tmp1, 2);
8523 subl(len, 2);
8524 addl(pos, 2);
8525
8526 bind(L_less_than_2_left);
8527 cmpl(len, 1);
8528 jcc(Assembler::less, L_zero_left);
8529
8530 // load 1 Byte
8531 movb(rax, Address(buf, pos, Address::times_1, 0));
8532 movb(Address(tmp1, 0 * 16), rax);
8533
8534 bind(L_zero_left);
8535 movdqu(xmm7, Address(rsp, 0));
8536 pxor(xmm7, xmm0); //xor the initial crc value
8537
8538 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8539 movdqu(xmm0, Address(rax, tmp2));
8540 pshufb(xmm7, xmm0);
8541 jmp(L_128_done);
8542
8543 bind(L_exact_16_left);
8544 movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
8545 pxor(xmm7, xmm0); //xor the initial crc value
8546 jmp(L_128_done);
8547
8548 bind(L_only_less_than_4);
8549 cmpl(len, 3);
8550 jcc(Assembler::less, L_only_less_than_3);
8551
8552 // load 3 Bytes
8553 movb(rax, Address(buf, pos, Address::times_1, 0));
8554 movb(Address(tmp1, 0), rax);
8555
8556 movb(rax, Address(buf, pos, Address::times_1, 1));
8557 movb(Address(tmp1, 1), rax);
8558
8559 movb(rax, Address(buf, pos, Address::times_1, 2));
8560 movb(Address(tmp1, 2), rax);
8561
8562 movdqu(xmm7, Address(rsp, 0));
8563 pxor(xmm7, xmm0); //xor the initial crc value
8564
8565 pslldq(xmm7, 0x5);
8566 jmp(L_barrett);
8567 bind(L_only_less_than_3);
8568 cmpl(len, 2);
8569 jcc(Assembler::less, L_only_less_than_2);
8570
8571 // load 2 Bytes
8572 movb(rax, Address(buf, pos, Address::times_1, 0));
8573 movb(Address(tmp1, 0), rax);
8574
8575 movb(rax, Address(buf, pos, Address::times_1, 1));
8576 movb(Address(tmp1, 1), rax);
8577
8578 movdqu(xmm7, Address(rsp, 0));
8579 pxor(xmm7, xmm0); //xor the initial crc value
8580
8581 pslldq(xmm7, 0x6);
8582 jmp(L_barrett);
8583
8584 bind(L_only_less_than_2);
8585 //load 1 Byte
8586 movb(rax, Address(buf, pos, Address::times_1, 0));
8587 movb(Address(tmp1, 0), rax);
8588
8589 movdqu(xmm7, Address(rsp, 0));
8590 pxor(xmm7, xmm0); //xor the initial crc value
8591
8592 pslldq(xmm7, 0x7);
8593 }
8594
8595 /**
8596 * Compute CRC32 using AVX512 instructions
8597 * param crc register containing existing CRC (32-bit)
8598 * param buf register pointing to input byte buffer (byte*)
8599 * param len register containing number of bytes
8600 * param table address of crc or crc32c table
8601 * param tmp1 scratch register
8602 * param tmp2 scratch register
8603 * return rax result register
8604 *
8605 * This routine is identical for crc32c with the exception of the precomputed constant
8606 * table which will be passed as the table argument. The calculation steps are
8607 * the same for both variants.
8608 */
8609 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
8610 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
8611
8612 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8613 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8614 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
8615 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
8616 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
8617
8618 const Register pos = r12;
8619 push(r12);
8620 subptr(rsp, 16 * 2 + 8);
8621
8622 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8623 // context for the registers used, where all instructions below are using 128-bit mode
8624 // On EVEX without VL and BW, these instructions will all be AVX.
8625 movl(pos, 0);
8626
8627 // check if smaller than 256B
8628 cmpl(len, 256);
8629 jcc(Assembler::less, L_less_than_256);
8630
8631 // load the initial crc value
8632 movdl(xmm10, crc);
8633
8634 // receive the initial 64B data, xor the initial crc value
8635 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
8636 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
8637 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
8638 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
8639
8640 subl(len, 256);
8641 cmpl(len, 256);
8642 jcc(Assembler::less, L_fold_128_B_loop);
8643
8644 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
8645 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
8646 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
8647 subl(len, 256);
8648
8649 bind(L_fold_256_B_loop);
8650 addl(pos, 256);
8651 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
8652 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
8653 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
8654 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
8655
8656 subl(len, 256);
8657 jcc(Assembler::greaterEqual, L_fold_256_B_loop);
8658
8659 // Fold 256 into 128
8660 addl(pos, 256);
8661 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
8662 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
8663 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
8664
8665 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
8666 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
8667 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
8668
8669 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
8670 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
8671
8672 addl(len, 128);
8673 jmp(L_fold_128_B_register);
8674
8675 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
8676 // loop will fold 128B at a time until we have 128 + y Bytes of buffer
8677
8678 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
8679 bind(L_fold_128_B_loop);
8680 addl(pos, 128);
8681 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
8682 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
8683
8684 subl(len, 128);
8685 jcc(Assembler::greaterEqual, L_fold_128_B_loop);
8686
8687 addl(pos, 128);
8688
8689 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
8690 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
8691 bind(L_fold_128_B_register);
8692 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
8693 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
8694 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
8695 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
8696 // save last that has no multiplicand
8697 vextracti64x2(xmm7, xmm4, 3);
8698
8699 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
8700 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
8701 // Needed later in reduction loop
8702 movdqu(xmm10, Address(table, 1 * 16));
8703 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
8704 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
8705
8706 // Swap 1,0,3,2 - 01 00 11 10
8707 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
8708 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
8709 vextracti128(xmm5, xmm8, 1);
8710 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
8711
8712 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
8713 // instead of a cmp instruction, we use the negative flag with the jl instruction
8714 addl(len, 128 - 16);
8715 jcc(Assembler::less, L_final_reduction_for_128);
8716
8717 bind(L_16B_reduction_loop);
8718 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8719 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8720 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8721 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
8722 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8723 addl(pos, 16);
8724 subl(len, 16);
8725 jcc(Assembler::greaterEqual, L_16B_reduction_loop);
8726
8727 bind(L_final_reduction_for_128);
8728 addl(len, 16);
8729 jcc(Assembler::equal, L_128_done);
8730
8731 bind(L_get_last_two_xmms);
8732 movdqu(xmm2, xmm7);
8733 addl(pos, len);
8734 movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
8735 subl(pos, len);
8736
8737 // get rid of the extra data that was loaded before
8738 // load the shift constant
8739 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
8740 movdqu(xmm0, Address(rax, len));
8741 addl(rax, len);
8742
8743 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8744 //Change mask to 512
8745 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
8746 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
8747
8748 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
8749 vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
8750 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8751 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
8752 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
8753
8754 bind(L_128_done);
8755 // compute crc of a 128-bit value
8756 movdqu(xmm10, Address(table, 3 * 16));
8757 movdqu(xmm0, xmm7);
8758
8759 // 64b fold
8760 vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
8761 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
8762 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8763
8764 // 32b fold
8765 movdqu(xmm0, xmm7);
8766 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
8767 vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
8768 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
8769 jmp(L_barrett);
8770
8771 bind(L_less_than_256);
8772 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
8773
8774 //barrett reduction
8775 bind(L_barrett);
8776 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
8777 movdqu(xmm1, xmm7);
8778 movdqu(xmm2, xmm7);
8779 movdqu(xmm10, Address(table, 4 * 16));
8780
8781 pclmulqdq(xmm7, xmm10, 0x0);
8782 pxor(xmm7, xmm2);
8783 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
8784 movdqu(xmm2, xmm7);
8785 pclmulqdq(xmm7, xmm10, 0x10);
8786 pxor(xmm7, xmm2);
8787 pxor(xmm7, xmm1);
8788 pextrd(crc, xmm7, 2);
8789
8790 bind(L_cleanup);
8791 addptr(rsp, 16 * 2 + 8);
8792 pop(r12);
8793 }
8794
8795 // S. Gueron / Information Processing Letters 112 (2012) 184
8796 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
8797 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
8798 // Output: the 64-bit carry-less product of B * CONST
8799 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
8800 Register tmp1, Register tmp2, Register tmp3) {
8801 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
8802 if (n > 0) {
8803 addq(tmp3, n * 256 * 8);
8804 }
8805 // Q1 = TABLEExt[n][B & 0xFF];
8806 movl(tmp1, in);
8807 andl(tmp1, 0x000000FF);
8808 shll(tmp1, 3);
8809 addq(tmp1, tmp3);
8810 movq(tmp1, Address(tmp1, 0));
8811
8812 // Q2 = TABLEExt[n][B >> 8 & 0xFF];
8813 movl(tmp2, in);
8814 shrl(tmp2, 8);
8815 andl(tmp2, 0x000000FF);
8816 shll(tmp2, 3);
8817 addq(tmp2, tmp3);
8818 movq(tmp2, Address(tmp2, 0));
8819
8820 shlq(tmp2, 8);
8821 xorq(tmp1, tmp2);
8822
8823 // Q3 = TABLEExt[n][B >> 16 & 0xFF];
8824 movl(tmp2, in);
8825 shrl(tmp2, 16);
8826 andl(tmp2, 0x000000FF);
8827 shll(tmp2, 3);
8828 addq(tmp2, tmp3);
8829 movq(tmp2, Address(tmp2, 0));
8830
8831 shlq(tmp2, 16);
8832 xorq(tmp1, tmp2);
8833
8834 // Q4 = TABLEExt[n][B >> 24 & 0xFF];
8835 shrl(in, 24);
8836 andl(in, 0x000000FF);
8837 shll(in, 3);
8838 addq(in, tmp3);
8839 movq(in, Address(in, 0));
8840
8841 shlq(in, 24);
8842 xorq(in, tmp1);
8843 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
8844 }
8845
8846 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
8847 Register in_out,
8848 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
8849 XMMRegister w_xtmp2,
8850 Register tmp1,
8851 Register n_tmp2, Register n_tmp3) {
8852 if (is_pclmulqdq_supported) {
8853 movdl(w_xtmp1, in_out); // modified blindly
8854
8855 movl(tmp1, const_or_pre_comp_const_index);
8856 movdl(w_xtmp2, tmp1);
8857 pclmulqdq(w_xtmp1, w_xtmp2, 0);
8858
8859 movdq(in_out, w_xtmp1);
8860 } else {
8861 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
8862 }
8863 }
8864
8865 // Recombination Alternative 2: No bit-reflections
8866 // T1 = (CRC_A * U1) << 1
8867 // T2 = (CRC_B * U2) << 1
8868 // C1 = T1 >> 32
8869 // C2 = T2 >> 32
8870 // T1 = T1 & 0xFFFFFFFF
8871 // T2 = T2 & 0xFFFFFFFF
8872 // T1 = CRC32(0, T1)
8873 // T2 = CRC32(0, T2)
8874 // C1 = C1 ^ T1
8875 // C2 = C2 ^ T2
8876 // CRC = C1 ^ C2 ^ CRC_C
8877 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
8878 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8879 Register tmp1, Register tmp2,
8880 Register n_tmp3) {
8881 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8882 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8883 shlq(in_out, 1);
8884 movl(tmp1, in_out);
8885 shrq(in_out, 32);
8886 xorl(tmp2, tmp2);
8887 crc32(tmp2, tmp1, 4);
8888 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
8889 shlq(in1, 1);
8890 movl(tmp1, in1);
8891 shrq(in1, 32);
8892 xorl(tmp2, tmp2);
8893 crc32(tmp2, tmp1, 4);
8894 xorl(in1, tmp2);
8895 xorl(in_out, in1);
8896 xorl(in_out, in2);
8897 }
8898
8899 // Set N to predefined value
8900 // Subtract from a length of a buffer
8901 // execute in a loop:
8902 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
8903 // for i = 1 to N do
8904 // CRC_A = CRC32(CRC_A, A[i])
8905 // CRC_B = CRC32(CRC_B, B[i])
8906 // CRC_C = CRC32(CRC_C, C[i])
8907 // end for
8908 // Recombine
8909 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
8910 Register in_out1, Register in_out2, Register in_out3,
8911 Register tmp1, Register tmp2, Register tmp3,
8912 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8913 Register tmp4, Register tmp5,
8914 Register n_tmp6) {
8915 Label L_processPartitions;
8916 Label L_processPartition;
8917 Label L_exit;
8918
8919 bind(L_processPartitions);
8920 cmpl(in_out1, 3 * size);
8921 jcc(Assembler::less, L_exit);
8922 xorl(tmp1, tmp1);
8923 xorl(tmp2, tmp2);
8924 movq(tmp3, in_out2);
8925 addq(tmp3, size);
8926
8927 bind(L_processPartition);
8928 crc32(in_out3, Address(in_out2, 0), 8);
8929 crc32(tmp1, Address(in_out2, size), 8);
8930 crc32(tmp2, Address(in_out2, size * 2), 8);
8931 addq(in_out2, 8);
8932 cmpq(in_out2, tmp3);
8933 jcc(Assembler::less, L_processPartition);
8934 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
8935 w_xtmp1, w_xtmp2, w_xtmp3,
8936 tmp4, tmp5,
8937 n_tmp6);
8938 addq(in_out2, 2 * size);
8939 subl(in_out1, 3 * size);
8940 jmp(L_processPartitions);
8941
8942 bind(L_exit);
8943 }
8944
8945 // Algorithm 2: Pipelined usage of the CRC32 instruction.
8946 // Input: A buffer I of L bytes.
8947 // Output: the CRC32C value of the buffer.
8948 // Notations:
8949 // Write L = 24N + r, with N = floor (L/24).
8950 // r = L mod 24 (0 <= r < 24).
8951 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
8952 // N quadwords, and R consists of r bytes.
8953 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
8954 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
8955 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
8956 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
8957 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
8958 Register tmp1, Register tmp2, Register tmp3,
8959 Register tmp4, Register tmp5, Register tmp6,
8960 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8961 bool is_pclmulqdq_supported) {
8962 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
8963 Label L_wordByWord;
8964 Label L_byteByByteProlog;
8965 Label L_byteByByte;
8966 Label L_exit;
8967
8968 if (is_pclmulqdq_supported ) {
8969 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
8970 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
8971
8972 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
8973 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
8974
8975 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
8976 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
8977 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
8978 } else {
8979 const_or_pre_comp_const_index[0] = 1;
8980 const_or_pre_comp_const_index[1] = 0;
8981
8982 const_or_pre_comp_const_index[2] = 3;
8983 const_or_pre_comp_const_index[3] = 2;
8984
8985 const_or_pre_comp_const_index[4] = 5;
8986 const_or_pre_comp_const_index[5] = 4;
8987 }
8988 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
8989 in2, in1, in_out,
8990 tmp1, tmp2, tmp3,
8991 w_xtmp1, w_xtmp2, w_xtmp3,
8992 tmp4, tmp5,
8993 tmp6);
8994 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
8995 in2, in1, in_out,
8996 tmp1, tmp2, tmp3,
8997 w_xtmp1, w_xtmp2, w_xtmp3,
8998 tmp4, tmp5,
8999 tmp6);
9000 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
9001 in2, in1, in_out,
9002 tmp1, tmp2, tmp3,
9003 w_xtmp1, w_xtmp2, w_xtmp3,
9004 tmp4, tmp5,
9005 tmp6);
9006 movl(tmp1, in2);
9007 andl(tmp1, 0x00000007);
9008 negl(tmp1);
9009 addl(tmp1, in2);
9010 addq(tmp1, in1);
9011
9012 cmpq(in1, tmp1);
9013 jccb(Assembler::greaterEqual, L_byteByByteProlog);
9014 align(16);
9015 BIND(L_wordByWord);
9016 crc32(in_out, Address(in1, 0), 8);
9017 addq(in1, 8);
9018 cmpq(in1, tmp1);
9019 jcc(Assembler::less, L_wordByWord);
9020
9021 BIND(L_byteByByteProlog);
9022 andl(in2, 0x00000007);
9023 movl(tmp2, 1);
9024
9025 cmpl(tmp2, in2);
9026 jccb(Assembler::greater, L_exit);
9027 BIND(L_byteByByte);
9028 crc32(in_out, Address(in1, 0), 1);
9029 incq(in1);
9030 incl(tmp2);
9031 cmpl(tmp2, in2);
9032 jcc(Assembler::lessEqual, L_byteByByte);
9033
9034 BIND(L_exit);
9035 }
9036 #undef BIND
9037 #undef BLOCK_COMMENT
9038
9039 // Compress char[] array to byte[].
9040 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
9041 // Return the array length if every element in array can be encoded,
9042 // otherwise, the index of first non-latin1 (> 0xff) character.
9043 // @IntrinsicCandidate
9044 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
9045 // for (int i = 0; i < len; i++) {
9046 // char c = src[srcOff];
9047 // if (c > 0xff) {
9048 // return i; // return index of non-latin1 char
9049 // }
9050 // dst[dstOff] = (byte)c;
9051 // srcOff++;
9052 // dstOff++;
9053 // }
9054 // return len;
9055 // }
9056 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
9057 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
9058 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
9059 Register tmp5, Register result, KRegister mask1, KRegister mask2) {
9060 Label copy_chars_loop, done, reset_sp, copy_tail;
9061
9062 // rsi: src
9063 // rdi: dst
9064 // rdx: len
9065 // rcx: tmp5
9066 // rax: result
9067
9068 // rsi holds start addr of source char[] to be compressed
9069 // rdi holds start addr of destination byte[]
9070 // rdx holds length
9071
9072 assert(len != result, "");
9073
9074 // save length for return
9075 movl(result, len);
9076
9077 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
9078 VM_Version::supports_avx512vlbw() &&
9079 VM_Version::supports_bmi2()) {
9080
9081 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
9082
9083 // alignment
9084 Label post_alignment;
9085
9086 // if length of the string is less than 32, handle it the old fashioned way
9087 testl(len, -32);
9088 jcc(Assembler::zero, below_threshold);
9089
9090 // First check whether a character is compressible ( <= 0xFF).
9091 // Create mask to test for Unicode chars inside zmm vector
9092 movl(tmp5, 0x00FF);
9093 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
9094
9095 testl(len, -64);
9096 jccb(Assembler::zero, post_alignment);
9097
9098 movl(tmp5, dst);
9099 andl(tmp5, (32 - 1));
9100 negl(tmp5);
9101 andl(tmp5, (32 - 1));
9102
9103 // bail out when there is nothing to be done
9104 testl(tmp5, 0xFFFFFFFF);
9105 jccb(Assembler::zero, post_alignment);
9106
9107 // ~(~0 << len), where len is the # of remaining elements to process
9108 movl(len, 0xFFFFFFFF);
9109 shlxl(len, len, tmp5);
9110 notl(len);
9111 kmovdl(mask2, len);
9112 movl(len, result);
9113
9114 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9115 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9116 ktestd(mask1, mask2);
9117 jcc(Assembler::carryClear, copy_tail);
9118
9119 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9120
9121 addptr(src, tmp5);
9122 addptr(src, tmp5);
9123 addptr(dst, tmp5);
9124 subl(len, tmp5);
9125
9126 bind(post_alignment);
9127 // end of alignment
9128
9129 movl(tmp5, len);
9130 andl(tmp5, (32 - 1)); // tail count (in chars)
9131 andl(len, ~(32 - 1)); // vector count (in chars)
9132 jccb(Assembler::zero, copy_loop_tail);
9133
9134 lea(src, Address(src, len, Address::times_2));
9135 lea(dst, Address(dst, len, Address::times_1));
9136 negptr(len);
9137
9138 bind(copy_32_loop);
9139 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
9140 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
9141 kortestdl(mask1, mask1);
9142 jccb(Assembler::carryClear, reset_for_copy_tail);
9143
9144 // All elements in current processed chunk are valid candidates for
9145 // compression. Write a truncated byte elements to the memory.
9146 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
9147 addptr(len, 32);
9148 jccb(Assembler::notZero, copy_32_loop);
9149
9150 bind(copy_loop_tail);
9151 // bail out when there is nothing to be done
9152 testl(tmp5, 0xFFFFFFFF);
9153 jcc(Assembler::zero, done);
9154
9155 movl(len, tmp5);
9156
9157 // ~(~0 << len), where len is the # of remaining elements to process
9158 movl(tmp5, 0xFFFFFFFF);
9159 shlxl(tmp5, tmp5, len);
9160 notl(tmp5);
9161
9162 kmovdl(mask2, tmp5);
9163
9164 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
9165 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
9166 ktestd(mask1, mask2);
9167 jcc(Assembler::carryClear, copy_tail);
9168
9169 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
9170 jmp(done);
9171
9172 bind(reset_for_copy_tail);
9173 lea(src, Address(src, tmp5, Address::times_2));
9174 lea(dst, Address(dst, tmp5, Address::times_1));
9175 subptr(len, tmp5);
9176 jmp(copy_chars_loop);
9177
9178 bind(below_threshold);
9179 }
9180
9181 if (UseSSE42Intrinsics) {
9182 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
9183
9184 // vectored compression
9185 testl(len, 0xfffffff8);
9186 jcc(Assembler::zero, copy_tail);
9187
9188 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors
9189 movdl(tmp1Reg, tmp5);
9190 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg
9191
9192 andl(len, 0xfffffff0);
9193 jccb(Assembler::zero, copy_16);
9194
9195 // compress 16 chars per iter
9196 pxor(tmp4Reg, tmp4Reg);
9197
9198 lea(src, Address(src, len, Address::times_2));
9199 lea(dst, Address(dst, len, Address::times_1));
9200 negptr(len);
9201
9202 bind(copy_32_loop);
9203 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters
9204 por(tmp4Reg, tmp2Reg);
9205 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
9206 por(tmp4Reg, tmp3Reg);
9207 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector
9208 jccb(Assembler::notZero, reset_for_copy_tail);
9209 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte
9210 movdqu(Address(dst, len, Address::times_1), tmp2Reg);
9211 addptr(len, 16);
9212 jccb(Assembler::notZero, copy_32_loop);
9213
9214 // compress next vector of 8 chars (if any)
9215 bind(copy_16);
9216 // len = 0
9217 testl(result, 0x00000008); // check if there's a block of 8 chars to compress
9218 jccb(Assembler::zero, copy_tail_sse);
9219
9220 pxor(tmp3Reg, tmp3Reg);
9221
9222 movdqu(tmp2Reg, Address(src, 0));
9223 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
9224 jccb(Assembler::notZero, reset_for_copy_tail);
9225 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte
9226 movq(Address(dst, 0), tmp2Reg);
9227 addptr(src, 16);
9228 addptr(dst, 8);
9229 jmpb(copy_tail_sse);
9230
9231 bind(reset_for_copy_tail);
9232 movl(tmp5, result);
9233 andl(tmp5, 0x0000000f);
9234 lea(src, Address(src, tmp5, Address::times_2));
9235 lea(dst, Address(dst, tmp5, Address::times_1));
9236 subptr(len, tmp5);
9237 jmpb(copy_chars_loop);
9238
9239 bind(copy_tail_sse);
9240 movl(len, result);
9241 andl(len, 0x00000007); // tail count (in chars)
9242 }
9243 // compress 1 char per iter
9244 bind(copy_tail);
9245 testl(len, len);
9246 jccb(Assembler::zero, done);
9247 lea(src, Address(src, len, Address::times_2));
9248 lea(dst, Address(dst, len, Address::times_1));
9249 negptr(len);
9250
9251 bind(copy_chars_loop);
9252 load_unsigned_short(tmp5, Address(src, len, Address::times_2));
9253 testl(tmp5, 0xff00); // check if Unicode char
9254 jccb(Assembler::notZero, reset_sp);
9255 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte
9256 increment(len);
9257 jccb(Assembler::notZero, copy_chars_loop);
9258
9259 // add len then return (len will be zero if compress succeeded, otherwise negative)
9260 bind(reset_sp);
9261 addl(result, len);
9262
9263 bind(done);
9264 }
9265
9266 // Inflate byte[] array to char[].
9267 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
9268 // @IntrinsicCandidate
9269 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
9270 // for (int i = 0; i < len; i++) {
9271 // dst[dstOff++] = (char)(src[srcOff++] & 0xff);
9272 // }
9273 // }
9274 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
9275 XMMRegister tmp1, Register tmp2, KRegister mask) {
9276 Label copy_chars_loop, done, below_threshold, avx3_threshold;
9277 // rsi: src
9278 // rdi: dst
9279 // rdx: len
9280 // rcx: tmp2
9281
9282 // rsi holds start addr of source byte[] to be inflated
9283 // rdi holds start addr of destination char[]
9284 // rdx holds length
9285 assert_different_registers(src, dst, len, tmp2);
9286 movl(tmp2, len);
9287 if ((UseAVX > 2) && // AVX512
9288 VM_Version::supports_avx512vlbw() &&
9289 VM_Version::supports_bmi2()) {
9290
9291 Label copy_32_loop, copy_tail;
9292 Register tmp3_aliased = len;
9293
9294 // if length of the string is less than 16, handle it in an old fashioned way
9295 testl(len, -16);
9296 jcc(Assembler::zero, below_threshold);
9297
9298 testl(len, -1 * AVX3Threshold);
9299 jcc(Assembler::zero, avx3_threshold);
9300
9301 // In order to use only one arithmetic operation for the main loop we use
9302 // this pre-calculation
9303 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
9304 andl(len, -32); // vector count
9305 jccb(Assembler::zero, copy_tail);
9306
9307 lea(src, Address(src, len, Address::times_1));
9308 lea(dst, Address(dst, len, Address::times_2));
9309 negptr(len);
9310
9311
9312 // inflate 32 chars per iter
9313 bind(copy_32_loop);
9314 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
9315 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
9316 addptr(len, 32);
9317 jcc(Assembler::notZero, copy_32_loop);
9318
9319 bind(copy_tail);
9320 // bail out when there is nothing to be done
9321 testl(tmp2, -1); // we don't destroy the contents of tmp2 here
9322 jcc(Assembler::zero, done);
9323
9324 // ~(~0 << length), where length is the # of remaining elements to process
9325 movl(tmp3_aliased, -1);
9326 shlxl(tmp3_aliased, tmp3_aliased, tmp2);
9327 notl(tmp3_aliased);
9328 kmovdl(mask, tmp3_aliased);
9329 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
9330 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
9331
9332 jmp(done);
9333 bind(avx3_threshold);
9334 }
9335 if (UseSSE42Intrinsics) {
9336 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
9337
9338 if (UseAVX > 1) {
9339 andl(tmp2, (16 - 1));
9340 andl(len, -16);
9341 jccb(Assembler::zero, copy_new_tail);
9342 } else {
9343 andl(tmp2, 0x00000007); // tail count (in chars)
9344 andl(len, 0xfffffff8); // vector count (in chars)
9345 jccb(Assembler::zero, copy_tail);
9346 }
9347
9348 // vectored inflation
9349 lea(src, Address(src, len, Address::times_1));
9350 lea(dst, Address(dst, len, Address::times_2));
9351 negptr(len);
9352
9353 if (UseAVX > 1) {
9354 bind(copy_16_loop);
9355 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
9356 vmovdqu(Address(dst, len, Address::times_2), tmp1);
9357 addptr(len, 16);
9358 jcc(Assembler::notZero, copy_16_loop);
9359
9360 bind(below_threshold);
9361 bind(copy_new_tail);
9362 movl(len, tmp2);
9363 andl(tmp2, 0x00000007);
9364 andl(len, 0xFFFFFFF8);
9365 jccb(Assembler::zero, copy_tail);
9366
9367 pmovzxbw(tmp1, Address(src, 0));
9368 movdqu(Address(dst, 0), tmp1);
9369 addptr(src, 8);
9370 addptr(dst, 2 * 8);
9371
9372 jmp(copy_tail, true);
9373 }
9374
9375 // inflate 8 chars per iter
9376 bind(copy_8_loop);
9377 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words
9378 movdqu(Address(dst, len, Address::times_2), tmp1);
9379 addptr(len, 8);
9380 jcc(Assembler::notZero, copy_8_loop);
9381
9382 bind(copy_tail);
9383 movl(len, tmp2);
9384
9385 cmpl(len, 4);
9386 jccb(Assembler::less, copy_bytes);
9387
9388 movdl(tmp1, Address(src, 0)); // load 4 byte chars
9389 pmovzxbw(tmp1, tmp1);
9390 movq(Address(dst, 0), tmp1);
9391 subptr(len, 4);
9392 addptr(src, 4);
9393 addptr(dst, 8);
9394
9395 bind(copy_bytes);
9396 } else {
9397 bind(below_threshold);
9398 }
9399
9400 testl(len, len);
9401 jccb(Assembler::zero, done);
9402 lea(src, Address(src, len, Address::times_1));
9403 lea(dst, Address(dst, len, Address::times_2));
9404 negptr(len);
9405
9406 // inflate 1 char per iter
9407 bind(copy_chars_loop);
9408 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char
9409 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word
9410 increment(len);
9411 jcc(Assembler::notZero, copy_chars_loop);
9412
9413 bind(done);
9414 }
9415
9416 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
9417 switch(type) {
9418 case T_BYTE:
9419 case T_BOOLEAN:
9420 evmovdqub(dst, kmask, src, merge, vector_len);
9421 break;
9422 case T_CHAR:
9423 case T_SHORT:
9424 evmovdquw(dst, kmask, src, merge, vector_len);
9425 break;
9426 case T_INT:
9427 case T_FLOAT:
9428 evmovdqul(dst, kmask, src, merge, vector_len);
9429 break;
9430 case T_LONG:
9431 case T_DOUBLE:
9432 evmovdquq(dst, kmask, src, merge, vector_len);
9433 break;
9434 default:
9435 fatal("Unexpected type argument %s", type2name(type));
9436 break;
9437 }
9438 }
9439
9440
9441 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
9442 switch(type) {
9443 case T_BYTE:
9444 case T_BOOLEAN:
9445 evmovdqub(dst, kmask, src, merge, vector_len);
9446 break;
9447 case T_CHAR:
9448 case T_SHORT:
9449 evmovdquw(dst, kmask, src, merge, vector_len);
9450 break;
9451 case T_INT:
9452 case T_FLOAT:
9453 evmovdqul(dst, kmask, src, merge, vector_len);
9454 break;
9455 case T_LONG:
9456 case T_DOUBLE:
9457 evmovdquq(dst, kmask, src, merge, vector_len);
9458 break;
9459 default:
9460 fatal("Unexpected type argument %s", type2name(type));
9461 break;
9462 }
9463 }
9464
9465 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
9466 switch(type) {
9467 case T_BYTE:
9468 case T_BOOLEAN:
9469 evmovdqub(dst, kmask, src, merge, vector_len);
9470 break;
9471 case T_CHAR:
9472 case T_SHORT:
9473 evmovdquw(dst, kmask, src, merge, vector_len);
9474 break;
9475 case T_INT:
9476 case T_FLOAT:
9477 evmovdqul(dst, kmask, src, merge, vector_len);
9478 break;
9479 case T_LONG:
9480 case T_DOUBLE:
9481 evmovdquq(dst, kmask, src, merge, vector_len);
9482 break;
9483 default:
9484 fatal("Unexpected type argument %s", type2name(type));
9485 break;
9486 }
9487 }
9488
9489 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
9490 switch(masklen) {
9491 case 2:
9492 knotbl(dst, src);
9493 movl(rtmp, 3);
9494 kmovbl(ktmp, rtmp);
9495 kandbl(dst, ktmp, dst);
9496 break;
9497 case 4:
9498 knotbl(dst, src);
9499 movl(rtmp, 15);
9500 kmovbl(ktmp, rtmp);
9501 kandbl(dst, ktmp, dst);
9502 break;
9503 case 8:
9504 knotbl(dst, src);
9505 break;
9506 case 16:
9507 knotwl(dst, src);
9508 break;
9509 case 32:
9510 knotdl(dst, src);
9511 break;
9512 case 64:
9513 knotql(dst, src);
9514 break;
9515 default:
9516 fatal("Unexpected vector length %d", masklen);
9517 break;
9518 }
9519 }
9520
9521 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9522 switch(type) {
9523 case T_BOOLEAN:
9524 case T_BYTE:
9525 kandbl(dst, src1, src2);
9526 break;
9527 case T_CHAR:
9528 case T_SHORT:
9529 kandwl(dst, src1, src2);
9530 break;
9531 case T_INT:
9532 case T_FLOAT:
9533 kanddl(dst, src1, src2);
9534 break;
9535 case T_LONG:
9536 case T_DOUBLE:
9537 kandql(dst, src1, src2);
9538 break;
9539 default:
9540 fatal("Unexpected type argument %s", type2name(type));
9541 break;
9542 }
9543 }
9544
9545 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9546 switch(type) {
9547 case T_BOOLEAN:
9548 case T_BYTE:
9549 korbl(dst, src1, src2);
9550 break;
9551 case T_CHAR:
9552 case T_SHORT:
9553 korwl(dst, src1, src2);
9554 break;
9555 case T_INT:
9556 case T_FLOAT:
9557 kordl(dst, src1, src2);
9558 break;
9559 case T_LONG:
9560 case T_DOUBLE:
9561 korql(dst, src1, src2);
9562 break;
9563 default:
9564 fatal("Unexpected type argument %s", type2name(type));
9565 break;
9566 }
9567 }
9568
9569 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
9570 switch(type) {
9571 case T_BOOLEAN:
9572 case T_BYTE:
9573 kxorbl(dst, src1, src2);
9574 break;
9575 case T_CHAR:
9576 case T_SHORT:
9577 kxorwl(dst, src1, src2);
9578 break;
9579 case T_INT:
9580 case T_FLOAT:
9581 kxordl(dst, src1, src2);
9582 break;
9583 case T_LONG:
9584 case T_DOUBLE:
9585 kxorql(dst, src1, src2);
9586 break;
9587 default:
9588 fatal("Unexpected type argument %s", type2name(type));
9589 break;
9590 }
9591 }
9592
9593 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9594 switch(type) {
9595 case T_BOOLEAN:
9596 case T_BYTE:
9597 evpermb(dst, mask, nds, src, merge, vector_len); break;
9598 case T_CHAR:
9599 case T_SHORT:
9600 evpermw(dst, mask, nds, src, merge, vector_len); break;
9601 case T_INT:
9602 case T_FLOAT:
9603 evpermd(dst, mask, nds, src, merge, vector_len); break;
9604 case T_LONG:
9605 case T_DOUBLE:
9606 evpermq(dst, mask, nds, src, merge, vector_len); break;
9607 default:
9608 fatal("Unexpected type argument %s", type2name(type)); break;
9609 }
9610 }
9611
9612 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9613 switch(type) {
9614 case T_BOOLEAN:
9615 case T_BYTE:
9616 evpermb(dst, mask, nds, src, merge, vector_len); break;
9617 case T_CHAR:
9618 case T_SHORT:
9619 evpermw(dst, mask, nds, src, merge, vector_len); break;
9620 case T_INT:
9621 case T_FLOAT:
9622 evpermd(dst, mask, nds, src, merge, vector_len); break;
9623 case T_LONG:
9624 case T_DOUBLE:
9625 evpermq(dst, mask, nds, src, merge, vector_len); break;
9626 default:
9627 fatal("Unexpected type argument %s", type2name(type)); break;
9628 }
9629 }
9630
9631 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9632 switch(type) {
9633 case T_BYTE:
9634 evpminub(dst, mask, nds, src, merge, vector_len); break;
9635 case T_SHORT:
9636 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9637 case T_INT:
9638 evpminud(dst, mask, nds, src, merge, vector_len); break;
9639 case T_LONG:
9640 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9641 default:
9642 fatal("Unexpected type argument %s", type2name(type)); break;
9643 }
9644 }
9645
9646 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9647 switch(type) {
9648 case T_BYTE:
9649 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9650 case T_SHORT:
9651 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9652 case T_INT:
9653 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9654 case T_LONG:
9655 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9656 default:
9657 fatal("Unexpected type argument %s", type2name(type)); break;
9658 }
9659 }
9660
9661 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9662 switch(type) {
9663 case T_BYTE:
9664 evpminub(dst, mask, nds, src, merge, vector_len); break;
9665 case T_SHORT:
9666 evpminuw(dst, mask, nds, src, merge, vector_len); break;
9667 case T_INT:
9668 evpminud(dst, mask, nds, src, merge, vector_len); break;
9669 case T_LONG:
9670 evpminuq(dst, mask, nds, src, merge, vector_len); break;
9671 default:
9672 fatal("Unexpected type argument %s", type2name(type)); break;
9673 }
9674 }
9675
9676 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9677 switch(type) {
9678 case T_BYTE:
9679 evpmaxub(dst, mask, nds, src, merge, vector_len); break;
9680 case T_SHORT:
9681 evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
9682 case T_INT:
9683 evpmaxud(dst, mask, nds, src, merge, vector_len); break;
9684 case T_LONG:
9685 evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
9686 default:
9687 fatal("Unexpected type argument %s", type2name(type)); break;
9688 }
9689 }
9690
9691 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9692 switch(type) {
9693 case T_BYTE:
9694 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9695 case T_SHORT:
9696 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9697 case T_INT:
9698 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9699 case T_LONG:
9700 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9701 case T_FLOAT:
9702 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9703 case T_DOUBLE:
9704 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9705 default:
9706 fatal("Unexpected type argument %s", type2name(type)); break;
9707 }
9708 }
9709
9710 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9711 switch(type) {
9712 case T_BYTE:
9713 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9714 case T_SHORT:
9715 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9716 case T_INT:
9717 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9718 case T_LONG:
9719 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9720 case T_FLOAT:
9721 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9722 case T_DOUBLE:
9723 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9724 default:
9725 fatal("Unexpected type argument %s", type2name(type)); break;
9726 }
9727 }
9728
9729 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9730 switch(type) {
9731 case T_BYTE:
9732 evpminsb(dst, mask, nds, src, merge, vector_len); break;
9733 case T_SHORT:
9734 evpminsw(dst, mask, nds, src, merge, vector_len); break;
9735 case T_INT:
9736 evpminsd(dst, mask, nds, src, merge, vector_len); break;
9737 case T_LONG:
9738 evpminsq(dst, mask, nds, src, merge, vector_len); break;
9739 case T_FLOAT:
9740 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9741 case T_DOUBLE:
9742 evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
9743 default:
9744 fatal("Unexpected type argument %s", type2name(type)); break;
9745 }
9746 }
9747
9748 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9749 switch(type) {
9750 case T_BYTE:
9751 evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
9752 case T_SHORT:
9753 evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
9754 case T_INT:
9755 evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
9756 case T_LONG:
9757 evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
9758 case T_FLOAT:
9759 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9760 case T_DOUBLE:
9761 evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
9762 default:
9763 fatal("Unexpected type argument %s", type2name(type)); break;
9764 }
9765 }
9766
9767 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9768 switch(type) {
9769 case T_INT:
9770 evpxord(dst, mask, nds, src, merge, vector_len); break;
9771 case T_LONG:
9772 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9773 default:
9774 fatal("Unexpected type argument %s", type2name(type)); break;
9775 }
9776 }
9777
9778 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9779 switch(type) {
9780 case T_INT:
9781 evpxord(dst, mask, nds, src, merge, vector_len); break;
9782 case T_LONG:
9783 evpxorq(dst, mask, nds, src, merge, vector_len); break;
9784 default:
9785 fatal("Unexpected type argument %s", type2name(type)); break;
9786 }
9787 }
9788
9789 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9790 switch(type) {
9791 case T_INT:
9792 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9793 case T_LONG:
9794 evporq(dst, mask, nds, src, merge, vector_len); break;
9795 default:
9796 fatal("Unexpected type argument %s", type2name(type)); break;
9797 }
9798 }
9799
9800 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9801 switch(type) {
9802 case T_INT:
9803 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
9804 case T_LONG:
9805 evporq(dst, mask, nds, src, merge, vector_len); break;
9806 default:
9807 fatal("Unexpected type argument %s", type2name(type)); break;
9808 }
9809 }
9810
9811 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9812 switch(type) {
9813 case T_INT:
9814 evpandd(dst, mask, nds, src, merge, vector_len); break;
9815 case T_LONG:
9816 evpandq(dst, mask, nds, src, merge, vector_len); break;
9817 default:
9818 fatal("Unexpected type argument %s", type2name(type)); break;
9819 }
9820 }
9821
9822 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9823 switch(type) {
9824 case T_INT:
9825 evpandd(dst, mask, nds, src, merge, vector_len); break;
9826 case T_LONG:
9827 evpandq(dst, mask, nds, src, merge, vector_len); break;
9828 default:
9829 fatal("Unexpected type argument %s", type2name(type)); break;
9830 }
9831 }
9832
9833 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
9834 switch(masklen) {
9835 case 8:
9836 kortestbl(src1, src2);
9837 break;
9838 case 16:
9839 kortestwl(src1, src2);
9840 break;
9841 case 32:
9842 kortestdl(src1, src2);
9843 break;
9844 case 64:
9845 kortestql(src1, src2);
9846 break;
9847 default:
9848 fatal("Unexpected mask length %d", masklen);
9849 break;
9850 }
9851 }
9852
9853
9854 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
9855 switch(masklen) {
9856 case 8:
9857 ktestbl(src1, src2);
9858 break;
9859 case 16:
9860 ktestwl(src1, src2);
9861 break;
9862 case 32:
9863 ktestdl(src1, src2);
9864 break;
9865 case 64:
9866 ktestql(src1, src2);
9867 break;
9868 default:
9869 fatal("Unexpected mask length %d", masklen);
9870 break;
9871 }
9872 }
9873
9874 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
9875 switch(type) {
9876 case T_INT:
9877 evprold(dst, mask, src, shift, merge, vlen_enc); break;
9878 case T_LONG:
9879 evprolq(dst, mask, src, shift, merge, vlen_enc); break;
9880 default:
9881 fatal("Unexpected type argument %s", type2name(type)); break;
9882 break;
9883 }
9884 }
9885
9886 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
9887 switch(type) {
9888 case T_INT:
9889 evprord(dst, mask, src, shift, merge, vlen_enc); break;
9890 case T_LONG:
9891 evprorq(dst, mask, src, shift, merge, vlen_enc); break;
9892 default:
9893 fatal("Unexpected type argument %s", type2name(type)); break;
9894 }
9895 }
9896
9897 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
9898 switch(type) {
9899 case T_INT:
9900 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
9901 case T_LONG:
9902 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
9903 default:
9904 fatal("Unexpected type argument %s", type2name(type)); break;
9905 }
9906 }
9907
9908 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
9909 switch(type) {
9910 case T_INT:
9911 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
9912 case T_LONG:
9913 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
9914 default:
9915 fatal("Unexpected type argument %s", type2name(type)); break;
9916 }
9917 }
9918
9919 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
9920 assert(rscratch != noreg || always_reachable(src), "missing");
9921
9922 if (reachable(src)) {
9923 evpandq(dst, nds, as_Address(src), vector_len);
9924 } else {
9925 lea(rscratch, src);
9926 evpandq(dst, nds, Address(rscratch, 0), vector_len);
9927 }
9928 }
9929
9930 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
9931 assert(rscratch != noreg || always_reachable(src), "missing");
9932
9933 if (reachable(src)) {
9934 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
9935 } else {
9936 lea(rscratch, src);
9937 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
9938 }
9939 }
9940
9941 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
9942 assert(rscratch != noreg || always_reachable(src), "missing");
9943
9944 if (reachable(src)) {
9945 evporq(dst, nds, as_Address(src), vector_len);
9946 } else {
9947 lea(rscratch, src);
9948 evporq(dst, nds, Address(rscratch, 0), vector_len);
9949 }
9950 }
9951
9952 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
9953 assert(rscratch != noreg || always_reachable(src), "missing");
9954
9955 if (reachable(src)) {
9956 vpshufb(dst, nds, as_Address(src), vector_len);
9957 } else {
9958 lea(rscratch, src);
9959 vpshufb(dst, nds, Address(rscratch, 0), vector_len);
9960 }
9961 }
9962
9963 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
9964 assert(rscratch != noreg || always_reachable(src), "missing");
9965
9966 if (reachable(src)) {
9967 Assembler::vpor(dst, nds, as_Address(src), vector_len);
9968 } else {
9969 lea(rscratch, src);
9970 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
9971 }
9972 }
9973
9974 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
9975 assert(rscratch != noreg || always_reachable(src3), "missing");
9976
9977 if (reachable(src3)) {
9978 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
9979 } else {
9980 lea(rscratch, src3);
9981 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
9982 }
9983 }
9984
9985 #if COMPILER2_OR_JVMCI
9986
9987 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
9988 Register length, Register temp, int vec_enc) {
9989 // Computing mask for predicated vector store.
9990 movptr(temp, -1);
9991 bzhiq(temp, temp, length);
9992 kmov(mask, temp);
9993 evmovdqu(bt, mask, dst, xmm, true, vec_enc);
9994 }
9995
9996 // Set memory operation for length "less than" 64 bytes.
9997 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
9998 XMMRegister xmm, KRegister mask, Register length,
9999 Register temp, bool use64byteVector) {
10000 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10001 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10002 if (!use64byteVector) {
10003 fill32(dst, disp, xmm);
10004 subptr(length, 32 >> shift);
10005 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
10006 } else {
10007 assert(MaxVectorSize == 64, "vector length != 64");
10008 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
10009 }
10010 }
10011
10012
10013 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
10014 XMMRegister xmm, KRegister mask, Register length,
10015 Register temp) {
10016 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10017 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
10018 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
10019 }
10020
10021
10022 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
10023 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10024 vmovdqu(dst, xmm);
10025 }
10026
10027 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
10028 fill32(Address(dst, disp), xmm);
10029 }
10030
10031 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
10032 assert(MaxVectorSize >= 32, "vector length should be >= 32");
10033 if (!use64byteVector) {
10034 fill32(dst, xmm);
10035 fill32(dst.plus_disp(32), xmm);
10036 } else {
10037 evmovdquq(dst, xmm, Assembler::AVX_512bit);
10038 }
10039 }
10040
10041 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
10042 fill64(Address(dst, disp), xmm, use64byteVector);
10043 }
10044
10045 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
10046 Register count, Register rtmp, XMMRegister xtmp) {
10047 Label L_exit;
10048 Label L_fill_start;
10049 Label L_fill_64_bytes;
10050 Label L_fill_96_bytes;
10051 Label L_fill_128_bytes;
10052 Label L_fill_128_bytes_loop;
10053 Label L_fill_128_loop_header;
10054 Label L_fill_128_bytes_loop_header;
10055 Label L_fill_128_bytes_loop_pre_header;
10056 Label L_fill_zmm_sequence;
10057
10058 int shift = -1;
10059 int avx3threshold = VM_Version::avx3_threshold();
10060 switch(type) {
10061 case T_BYTE: shift = 0;
10062 break;
10063 case T_SHORT: shift = 1;
10064 break;
10065 case T_INT: shift = 2;
10066 break;
10067 /* Uncomment when LONG fill stubs are supported.
10068 case T_LONG: shift = 3;
10069 break;
10070 */
10071 default:
10072 fatal("Unhandled type: %s\n", type2name(type));
10073 }
10074
10075 if ((avx3threshold != 0) || (MaxVectorSize == 32)) {
10076
10077 if (MaxVectorSize == 64) {
10078 cmpq(count, avx3threshold >> shift);
10079 jcc(Assembler::greater, L_fill_zmm_sequence);
10080 }
10081
10082 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
10083
10084 bind(L_fill_start);
10085
10086 cmpq(count, 32 >> shift);
10087 jccb(Assembler::greater, L_fill_64_bytes);
10088 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
10089 jmp(L_exit);
10090
10091 bind(L_fill_64_bytes);
10092 cmpq(count, 64 >> shift);
10093 jccb(Assembler::greater, L_fill_96_bytes);
10094 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
10095 jmp(L_exit);
10096
10097 bind(L_fill_96_bytes);
10098 cmpq(count, 96 >> shift);
10099 jccb(Assembler::greater, L_fill_128_bytes);
10100 fill64(to, 0, xtmp);
10101 subq(count, 64 >> shift);
10102 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
10103 jmp(L_exit);
10104
10105 bind(L_fill_128_bytes);
10106 cmpq(count, 128 >> shift);
10107 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
10108 fill64(to, 0, xtmp);
10109 fill32(to, 64, xtmp);
10110 subq(count, 96 >> shift);
10111 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
10112 jmp(L_exit);
10113
10114 bind(L_fill_128_bytes_loop_pre_header);
10115 {
10116 mov(rtmp, to);
10117 andq(rtmp, 31);
10118 jccb(Assembler::zero, L_fill_128_bytes_loop_header);
10119 negq(rtmp);
10120 addq(rtmp, 32);
10121 mov64(r8, -1L);
10122 bzhiq(r8, r8, rtmp);
10123 kmovql(k2, r8);
10124 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10125 addq(to, rtmp);
10126 shrq(rtmp, shift);
10127 subq(count, rtmp);
10128 }
10129
10130 cmpq(count, 128 >> shift);
10131 jcc(Assembler::less, L_fill_start);
10132
10133 bind(L_fill_128_bytes_loop_header);
10134 subq(count, 128 >> shift);
10135
10136 align32();
10137 bind(L_fill_128_bytes_loop);
10138 fill64(to, 0, xtmp);
10139 fill64(to, 64, xtmp);
10140 addq(to, 128);
10141 subq(count, 128 >> shift);
10142 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10143
10144 addq(count, 128 >> shift);
10145 jcc(Assembler::zero, L_exit);
10146 jmp(L_fill_start);
10147 }
10148
10149 if (MaxVectorSize == 64) {
10150 // Sequence using 64 byte ZMM register.
10151 Label L_fill_128_bytes_zmm;
10152 Label L_fill_192_bytes_zmm;
10153 Label L_fill_192_bytes_loop_zmm;
10154 Label L_fill_192_bytes_loop_header_zmm;
10155 Label L_fill_192_bytes_loop_pre_header_zmm;
10156 Label L_fill_start_zmm_sequence;
10157
10158 bind(L_fill_zmm_sequence);
10159 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10160
10161 bind(L_fill_start_zmm_sequence);
10162 cmpq(count, 64 >> shift);
10163 jccb(Assembler::greater, L_fill_128_bytes_zmm);
10164 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10165 jmp(L_exit);
10166
10167 bind(L_fill_128_bytes_zmm);
10168 cmpq(count, 128 >> shift);
10169 jccb(Assembler::greater, L_fill_192_bytes_zmm);
10170 fill64(to, 0, xtmp, true);
10171 subq(count, 64 >> shift);
10172 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10173 jmp(L_exit);
10174
10175 bind(L_fill_192_bytes_zmm);
10176 cmpq(count, 192 >> shift);
10177 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10178 fill64(to, 0, xtmp, true);
10179 fill64(to, 64, xtmp, true);
10180 subq(count, 128 >> shift);
10181 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10182 jmp(L_exit);
10183
10184 bind(L_fill_192_bytes_loop_pre_header_zmm);
10185 {
10186 movq(rtmp, to);
10187 andq(rtmp, 63);
10188 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10189 negq(rtmp);
10190 addq(rtmp, 64);
10191 mov64(r8, -1L);
10192 bzhiq(r8, r8, rtmp);
10193 kmovql(k2, r8);
10194 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10195 addq(to, rtmp);
10196 shrq(rtmp, shift);
10197 subq(count, rtmp);
10198 }
10199
10200 cmpq(count, 192 >> shift);
10201 jcc(Assembler::less, L_fill_start_zmm_sequence);
10202
10203 bind(L_fill_192_bytes_loop_header_zmm);
10204 subq(count, 192 >> shift);
10205
10206 align32();
10207 bind(L_fill_192_bytes_loop_zmm);
10208 fill64(to, 0, xtmp, true);
10209 fill64(to, 64, xtmp, true);
10210 fill64(to, 128, xtmp, true);
10211 addq(to, 192);
10212 subq(count, 192 >> shift);
10213 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10214
10215 addq(count, 192 >> shift);
10216 jcc(Assembler::zero, L_exit);
10217 jmp(L_fill_start_zmm_sequence);
10218 }
10219 bind(L_exit);
10220 }
10221 #endif //COMPILER2_OR_JVMCI
10222
10223
10224 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10225 Label done;
10226 cvttss2sil(dst, src);
10227 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10228 cmpl(dst, 0x80000000); // float_sign_flip
10229 jccb(Assembler::notEqual, done);
10230 subptr(rsp, 8);
10231 movflt(Address(rsp, 0), src);
10232 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10233 pop(dst);
10234 bind(done);
10235 }
10236
10237 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10238 Label done;
10239 cvttsd2sil(dst, src);
10240 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10241 cmpl(dst, 0x80000000); // float_sign_flip
10242 jccb(Assembler::notEqual, done);
10243 subptr(rsp, 8);
10244 movdbl(Address(rsp, 0), src);
10245 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10246 pop(dst);
10247 bind(done);
10248 }
10249
10250 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10251 Label done;
10252 cvttss2siq(dst, src);
10253 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10254 jccb(Assembler::notEqual, done);
10255 subptr(rsp, 8);
10256 movflt(Address(rsp, 0), src);
10257 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10258 pop(dst);
10259 bind(done);
10260 }
10261
10262 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10263 // Following code is line by line assembly translation rounding algorithm.
10264 // Please refer to java.lang.Math.round(float) algorithm for details.
10265 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10266 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10267 const int32_t FloatConsts_EXP_BIAS = 127;
10268 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10269 const int32_t MINUS_32 = 0xFFFFFFE0;
10270 Label L_special_case, L_block1, L_exit;
10271 movl(rtmp, FloatConsts_EXP_BIT_MASK);
10272 movdl(dst, src);
10273 andl(dst, rtmp);
10274 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10275 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10276 subl(rtmp, dst);
10277 movl(rcx, rtmp);
10278 movl(dst, MINUS_32);
10279 testl(rtmp, dst);
10280 jccb(Assembler::notEqual, L_special_case);
10281 movdl(dst, src);
10282 andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10283 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10284 movdl(rtmp, src);
10285 testl(rtmp, rtmp);
10286 jccb(Assembler::greaterEqual, L_block1);
10287 negl(dst);
10288 bind(L_block1);
10289 sarl(dst);
10290 addl(dst, 0x1);
10291 sarl(dst, 0x1);
10292 jmp(L_exit);
10293 bind(L_special_case);
10294 convert_f2i(dst, src);
10295 bind(L_exit);
10296 }
10297
10298 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10299 // Following code is line by line assembly translation rounding algorithm.
10300 // Please refer to java.lang.Math.round(double) algorithm for details.
10301 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10302 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10303 const int64_t DoubleConsts_EXP_BIAS = 1023;
10304 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10305 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10306 Label L_special_case, L_block1, L_exit;
10307 mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10308 movq(dst, src);
10309 andq(dst, rtmp);
10310 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10311 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10312 subq(rtmp, dst);
10313 movq(rcx, rtmp);
10314 mov64(dst, MINUS_64);
10315 testq(rtmp, dst);
10316 jccb(Assembler::notEqual, L_special_case);
10317 movq(dst, src);
10318 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10319 andq(dst, rtmp);
10320 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10321 orq(dst, rtmp);
10322 movq(rtmp, src);
10323 testq(rtmp, rtmp);
10324 jccb(Assembler::greaterEqual, L_block1);
10325 negq(dst);
10326 bind(L_block1);
10327 sarq(dst);
10328 addq(dst, 0x1);
10329 sarq(dst, 0x1);
10330 jmp(L_exit);
10331 bind(L_special_case);
10332 convert_d2l(dst, src);
10333 bind(L_exit);
10334 }
10335
10336 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10337 Label done;
10338 cvttsd2siq(dst, src);
10339 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10340 jccb(Assembler::notEqual, done);
10341 subptr(rsp, 8);
10342 movdbl(Address(rsp, 0), src);
10343 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10344 pop(dst);
10345 bind(done);
10346 }
10347
10348 void MacroAssembler::cache_wb(Address line)
10349 {
10350 // 64 bit cpus always support clflush
10351 assert(VM_Version::supports_clflush(), "clflush should be available");
10352 bool optimized = VM_Version::supports_clflushopt();
10353 bool no_evict = VM_Version::supports_clwb();
10354
10355 // prefer clwb (writeback without evict) otherwise
10356 // prefer clflushopt (potentially parallel writeback with evict)
10357 // otherwise fallback on clflush (serial writeback with evict)
10358
10359 if (optimized) {
10360 if (no_evict) {
10361 clwb(line);
10362 } else {
10363 clflushopt(line);
10364 }
10365 } else {
10366 // no need for fence when using CLFLUSH
10367 clflush(line);
10368 }
10369 }
10370
10371 void MacroAssembler::cache_wbsync(bool is_pre)
10372 {
10373 assert(VM_Version::supports_clflush(), "clflush should be available");
10374 bool optimized = VM_Version::supports_clflushopt();
10375 bool no_evict = VM_Version::supports_clwb();
10376
10377 // pick the correct implementation
10378
10379 if (!is_pre && (optimized || no_evict)) {
10380 // need an sfence for post flush when using clflushopt or clwb
10381 // otherwise no no need for any synchroniaztion
10382
10383 sfence();
10384 }
10385 }
10386
10387 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10388 switch (cond) {
10389 // Note some conditions are synonyms for others
10390 case Assembler::zero: return Assembler::notZero;
10391 case Assembler::notZero: return Assembler::zero;
10392 case Assembler::less: return Assembler::greaterEqual;
10393 case Assembler::lessEqual: return Assembler::greater;
10394 case Assembler::greater: return Assembler::lessEqual;
10395 case Assembler::greaterEqual: return Assembler::less;
10396 case Assembler::below: return Assembler::aboveEqual;
10397 case Assembler::belowEqual: return Assembler::above;
10398 case Assembler::above: return Assembler::belowEqual;
10399 case Assembler::aboveEqual: return Assembler::below;
10400 case Assembler::overflow: return Assembler::noOverflow;
10401 case Assembler::noOverflow: return Assembler::overflow;
10402 case Assembler::negative: return Assembler::positive;
10403 case Assembler::positive: return Assembler::negative;
10404 case Assembler::parity: return Assembler::noParity;
10405 case Assembler::noParity: return Assembler::parity;
10406 }
10407 ShouldNotReachHere(); return Assembler::overflow;
10408 }
10409
10410 // This is simply a call to Thread::current()
10411 void MacroAssembler::get_thread_slow(Register thread) {
10412 if (thread != rax) {
10413 push(rax);
10414 }
10415 push(rdi);
10416 push(rsi);
10417 push(rdx);
10418 push(rcx);
10419 push(r8);
10420 push(r9);
10421 push(r10);
10422 push(r11);
10423
10424 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10425
10426 pop(r11);
10427 pop(r10);
10428 pop(r9);
10429 pop(r8);
10430 pop(rcx);
10431 pop(rdx);
10432 pop(rsi);
10433 pop(rdi);
10434 if (thread != rax) {
10435 mov(thread, rax);
10436 pop(rax);
10437 }
10438 }
10439
10440 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10441 Label L_stack_ok;
10442 if (bias == 0) {
10443 testptr(sp, 2 * wordSize - 1);
10444 } else {
10445 // lea(tmp, Address(rsp, bias);
10446 mov(tmp, sp);
10447 addptr(tmp, bias);
10448 testptr(tmp, 2 * wordSize - 1);
10449 }
10450 jcc(Assembler::equal, L_stack_ok);
10451 block_comment(msg);
10452 stop(msg);
10453 bind(L_stack_ok);
10454 }
10455
10456 // Implements fast-locking.
10457 //
10458 // obj: the object to be locked
10459 // reg_rax: rax
10460 // thread: the thread which attempts to lock obj
10461 // tmp: a temporary register
10462 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10463 Register thread = r15_thread;
10464
10465 assert(reg_rax == rax, "");
10466 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10467
10468 Label push;
10469 const Register top = tmp;
10470
10471 // Preload the markWord. It is important that this is the first
10472 // instruction emitted as it is part of C1's null check semantics.
10473 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10474
10475 if (UseObjectMonitorTable) {
10476 // Clear cache in case fast locking succeeds or we need to take the slow-path.
10477 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10478 }
10479
10480 if (DiagnoseSyncOnValueBasedClasses != 0) {
10481 load_klass(tmp, obj, rscratch1);
10482 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10483 jcc(Assembler::notZero, slow);
10484 }
10485
10486 // Load top.
10487 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10488
10489 // Check if the lock-stack is full.
10490 cmpl(top, LockStack::end_offset());
10491 jcc(Assembler::greaterEqual, slow);
10492
10493 // Check for recursion.
10494 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10495 jcc(Assembler::equal, push);
10496
10497 // Check header for monitor (0b10).
10498 testptr(reg_rax, markWord::monitor_value);
10499 jcc(Assembler::notZero, slow);
10500
10501 // Try to lock. Transition lock bits 0b01 => 0b00
10502 movptr(tmp, reg_rax);
10503 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10504 orptr(reg_rax, markWord::unlocked_value);
10505 // Mask inline_type bit such that we go to the slow path if object is an inline type
10506 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10507
10508 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10509 jcc(Assembler::notEqual, slow);
10510
10511 // Restore top, CAS clobbers register.
10512 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10513
10514 bind(push);
10515 // After successful lock, push object on lock-stack.
10516 movptr(Address(thread, top), obj);
10517 incrementl(top, oopSize);
10518 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10519 }
10520
10521 // Implements fast-unlocking.
10522 //
10523 // obj: the object to be unlocked
10524 // reg_rax: rax
10525 // thread: the thread
10526 // tmp: a temporary register
10527 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10528 Register thread = r15_thread;
10529
10530 assert(reg_rax == rax, "");
10531 assert_different_registers(obj, reg_rax, thread, tmp);
10532
10533 Label unlocked, push_and_slow;
10534 const Register top = tmp;
10535
10536 // Check if obj is top of lock-stack.
10537 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10538 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10539 jcc(Assembler::notEqual, slow);
10540
10541 // Pop lock-stack.
10542 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10543 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10544
10545 // Check if recursive.
10546 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10547 jcc(Assembler::equal, unlocked);
10548
10549 // Not recursive. Check header for monitor (0b10).
10550 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10551 testptr(reg_rax, markWord::monitor_value);
10552 jcc(Assembler::notZero, push_and_slow);
10553
10554 #ifdef ASSERT
10555 // Check header not unlocked (0b01).
10556 Label not_unlocked;
10557 testptr(reg_rax, markWord::unlocked_value);
10558 jcc(Assembler::zero, not_unlocked);
10559 stop("fast_unlock already unlocked");
10560 bind(not_unlocked);
10561 #endif
10562
10563 // Try to unlock. Transition lock bits 0b00 => 0b01
10564 movptr(tmp, reg_rax);
10565 orptr(tmp, markWord::unlocked_value);
10566 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10567 jcc(Assembler::equal, unlocked);
10568
10569 bind(push_and_slow);
10570 // Restore lock-stack and handle the unlock in runtime.
10571 #ifdef ASSERT
10572 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10573 movptr(Address(thread, top), obj);
10574 #endif
10575 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10576 jmp(slow);
10577
10578 bind(unlocked);
10579 }
10580
10581 // Saves legacy GPRs state on stack.
10582 void MacroAssembler::save_legacy_gprs() {
10583 subq(rsp, 16 * wordSize);
10584 movq(Address(rsp, 15 * wordSize), rax);
10585 movq(Address(rsp, 14 * wordSize), rcx);
10586 movq(Address(rsp, 13 * wordSize), rdx);
10587 movq(Address(rsp, 12 * wordSize), rbx);
10588 movq(Address(rsp, 10 * wordSize), rbp);
10589 movq(Address(rsp, 9 * wordSize), rsi);
10590 movq(Address(rsp, 8 * wordSize), rdi);
10591 movq(Address(rsp, 7 * wordSize), r8);
10592 movq(Address(rsp, 6 * wordSize), r9);
10593 movq(Address(rsp, 5 * wordSize), r10);
10594 movq(Address(rsp, 4 * wordSize), r11);
10595 movq(Address(rsp, 3 * wordSize), r12);
10596 movq(Address(rsp, 2 * wordSize), r13);
10597 movq(Address(rsp, wordSize), r14);
10598 movq(Address(rsp, 0), r15);
10599 }
10600
10601 // Resotres back legacy GPRs state from stack.
10602 void MacroAssembler::restore_legacy_gprs() {
10603 movq(r15, Address(rsp, 0));
10604 movq(r14, Address(rsp, wordSize));
10605 movq(r13, Address(rsp, 2 * wordSize));
10606 movq(r12, Address(rsp, 3 * wordSize));
10607 movq(r11, Address(rsp, 4 * wordSize));
10608 movq(r10, Address(rsp, 5 * wordSize));
10609 movq(r9, Address(rsp, 6 * wordSize));
10610 movq(r8, Address(rsp, 7 * wordSize));
10611 movq(rdi, Address(rsp, 8 * wordSize));
10612 movq(rsi, Address(rsp, 9 * wordSize));
10613 movq(rbp, Address(rsp, 10 * wordSize));
10614 movq(rbx, Address(rsp, 12 * wordSize));
10615 movq(rdx, Address(rsp, 13 * wordSize));
10616 movq(rcx, Address(rsp, 14 * wordSize));
10617 movq(rax, Address(rsp, 15 * wordSize));
10618 addq(rsp, 16 * wordSize);
10619 }
10620
10621 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10622 if (VM_Version::supports_apx_f()) {
10623 esetzucc(comparison, dst);
10624 } else {
10625 setb(comparison, dst);
10626 movzbl(dst, dst);
10627 }
10628 }