1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/icache.hpp"
52 #include "runtime/interfaceSupport.inline.hpp"
53 #include "runtime/javaThread.hpp"
54 #include "runtime/jniHandles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "utilities/globalDefinitions.hpp"
58 #include "utilities/integerCast.hpp"
59 #include "utilities/powerOfTwo.hpp"
60 #ifdef COMPILER1
61 #include "c1/c1_LIRAssembler.hpp"
62 #endif
63 #ifdef COMPILER2
64 #include "oops/oop.hpp"
65 #include "opto/compile.hpp"
66 #include "opto/node.hpp"
67 #include "opto/output.hpp"
68 #endif
69
70 #include <sys/types.h>
71
72 #ifdef PRODUCT
73 #define BLOCK_COMMENT(str) /* nothing */
74 #else
75 #define BLOCK_COMMENT(str) block_comment(str)
76 #endif
77 #define STOP(str) stop(str);
78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
79
80 #ifdef ASSERT
81 extern "C" void disnm(intptr_t p);
82 #endif
83 // Target-dependent relocation processing
84 //
85 // Instruction sequences whose target may need to be retrieved or
86 // patched are distinguished by their leading instruction, sorting
87 // them into three main instruction groups and related subgroups.
88 //
89 // 1) Branch, Exception and System (insn count = 1)
90 // 1a) Unconditional branch (immediate):
91 // b/bl imm19
92 // 1b) Compare & branch (immediate):
93 // cbz/cbnz Rt imm19
94 // 1c) Test & branch (immediate):
95 // tbz/tbnz Rt imm14
96 // 1d) Conditional branch (immediate):
97 // b.cond imm19
98 //
99 // 2) Loads and Stores (insn count = 1)
100 // 2a) Load register literal:
101 // ldr Rt imm19
102 //
103 // 3) Data Processing Immediate (insn count = 2 or 3)
104 // 3a) PC-rel. addressing
105 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12
106 // adr/adrp Rx imm21; add Ry Rx #imm12
107 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
108 // adr/adrp Rx imm21
109 // adr/adrp Rx imm21; movk Rx #imm16<<32
110 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
111 // The latter form can only happen when the target is an
112 // ExternalAddress, and (by definition) ExternalAddresses don't
113 // move. Because of that property, there is never any need to
114 // patch the last of the three instructions. However,
115 // MacroAssembler::target_addr_for_insn takes all three
116 // instructions into account and returns the correct address.
117 // 3b) Move wide (immediate)
118 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
119 //
120 // A switch on a subset of the instruction's bits provides an
121 // efficient dispatch to these subcases.
122 //
123 // insn[28:26] -> main group ('x' == don't care)
124 // 00x -> UNALLOCATED
125 // 100 -> Data Processing Immediate
126 // 101 -> Branch, Exception and System
127 // x1x -> Loads and Stores
128 //
129 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
130 // n.b. in some cases extra bits need to be checked to verify the
131 // instruction is as expected
132 //
133 // 1) ... xx101x Branch, Exception and System
134 // 1a) 00___x Unconditional branch (immediate)
135 // 1b) 01___0 Compare & branch (immediate)
136 // 1c) 01___1 Test & branch (immediate)
137 // 1d) 10___0 Conditional branch (immediate)
138 // other Should not happen
139 //
140 // 2) ... xxx1x0 Loads and Stores
141 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
142 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
143 // strictly should be 64 bit non-FP/SIMD i.e.
144 // 0101_000 (i.e. requires insn[31:24] == 01011000)
145 //
146 // 3) ... xx100x Data Processing Immediate
147 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
148 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
149 // strictly should be 64 bit movz #imm16<<0
150 // 110___10100 (i.e. requires insn[31:21] == 11010010100)
151 //
152
153 static uint32_t insn_at(address insn_addr, int n) {
154 return ((uint32_t*)insn_addr)[n];
155 }
156
157 template<typename T>
158 class RelocActions : public AllStatic {
159
160 public:
161
162 static int ALWAYSINLINE run(address insn_addr, address &target) {
163 int instructions = 1;
164 uint32_t insn = insn_at(insn_addr, 0);
165
166 uint32_t dispatch = Instruction_aarch64::extract(insn, 30, 25);
167 switch(dispatch) {
168 case 0b001010:
169 case 0b001011: {
170 instructions = T::unconditionalBranch(insn_addr, target);
171 break;
172 }
173 case 0b101010: // Conditional branch (immediate)
174 case 0b011010: { // Compare & branch (immediate)
175 instructions = T::conditionalBranch(insn_addr, target);
176 break;
177 }
178 case 0b011011: {
179 instructions = T::testAndBranch(insn_addr, target);
180 break;
181 }
182 case 0b001100:
183 case 0b001110:
184 case 0b011100:
185 case 0b011110:
186 case 0b101100:
187 case 0b101110:
188 case 0b111100:
189 case 0b111110: {
190 // load/store
191 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
192 // Load register (literal)
193 instructions = T::loadStore(insn_addr, target);
194 break;
195 } else {
196 // nothing to do
197 assert(target == nullptr, "did not expect to relocate target for polling page load");
198 }
199 break;
200 }
201 case 0b001000:
202 case 0b011000:
203 case 0b101000:
204 case 0b111000: {
205 // adr/adrp
206 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
207 int shift = Instruction_aarch64::extract(insn, 31, 31);
208 if (shift) {
209 uint32_t insn2 = insn_at(insn_addr, 1);
210 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
211 Instruction_aarch64::extract(insn, 4, 0) ==
212 Instruction_aarch64::extract(insn2, 9, 5)) {
213 instructions = T::adrp(insn_addr, target, T::adrpMem);
214 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
215 Instruction_aarch64::extract(insn, 4, 0) ==
216 Instruction_aarch64::extract(insn2, 4, 0)) {
217 instructions = T::adrp(insn_addr, target, T::adrpAdd);
218 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
219 Instruction_aarch64::extract(insn, 4, 0) ==
220 Instruction_aarch64::extract(insn2, 4, 0)) {
221 instructions = T::adrp(insn_addr, target, T::adrpMovk);
222 } else {
223 ShouldNotReachHere();
224 }
225 } else {
226 instructions = T::adr(insn_addr, target);
227 }
228 break;
229 }
230 case 0b001001:
231 case 0b011001:
232 case 0b101001:
233 case 0b111001: {
234 instructions = T::immediate(insn_addr, target);
235 break;
236 }
237 default: {
238 ShouldNotReachHere();
239 }
240 }
241
242 T::verify(insn_addr, target);
243 return instructions * NativeInstruction::instruction_size;
244 }
245 };
246
247 class Patcher : public AllStatic {
248 public:
249 static int unconditionalBranch(address insn_addr, address &target) {
250 intptr_t offset = (target - insn_addr) >> 2;
251 Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
252 return 1;
253 }
254 static int conditionalBranch(address insn_addr, address &target) {
255 intptr_t offset = (target - insn_addr) >> 2;
256 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
257 return 1;
258 }
259 static int testAndBranch(address insn_addr, address &target) {
260 intptr_t offset = (target - insn_addr) >> 2;
261 Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
262 return 1;
263 }
264 static int loadStore(address insn_addr, address &target) {
265 intptr_t offset = (target - insn_addr) >> 2;
266 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
267 return 1;
268 }
269 static int adr(address insn_addr, address &target) {
270 #ifdef ASSERT
271 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
272 #endif
273 // PC-rel. addressing
274 ptrdiff_t offset = target - insn_addr;
275 int offset_lo = offset & 3;
276 offset >>= 2;
277 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
278 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
279 return 1;
280 }
281 template<typename U>
282 static int adrp(address insn_addr, address &target, U inner) {
283 int instructions = 1;
284 #ifdef ASSERT
285 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
286 #endif
287 ptrdiff_t offset = target - insn_addr;
288 instructions = 2;
289 precond(inner != nullptr);
290 // Give the inner reloc a chance to modify the target.
291 address adjusted_target = target;
292 instructions = inner(insn_addr, adjusted_target);
293 uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
294 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
295 offset = adr_page - pc_page;
296 int offset_lo = offset & 3;
297 offset >>= 2;
298 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
299 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
300 return instructions;
301 }
302 static int adrpMem(address insn_addr, address &target) {
303 uintptr_t dest = (uintptr_t)target;
304 int offset_lo = dest & 0xfff;
305 uint32_t insn2 = insn_at(insn_addr, 1);
306 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
307 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
308 guarantee(((dest >> size) << size) == dest, "misaligned target");
309 return 2;
310 }
311 static int adrpAdd(address insn_addr, address &target) {
312 uintptr_t dest = (uintptr_t)target;
313 int offset_lo = dest & 0xfff;
314 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
315 return 2;
316 }
317 static int adrpMovk(address insn_addr, address &target) {
318 uintptr_t dest = uintptr_t(target);
319 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
320 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
321 target = address(dest);
322 return 2;
323 }
324 static int immediate(address insn_addr, address &target) {
325 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
326 uint64_t dest = (uint64_t)target;
327 // Move wide constant
328 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
329 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
330 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
331 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
332 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
333 return 3;
334 }
335 static void verify(address insn_addr, address &target) {
336 #ifdef ASSERT
337 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
338 if (!(address_is == target)) {
339 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
340 disnm((intptr_t)insn_addr);
341 assert(address_is == target, "should be");
342 }
343 #endif
344 }
345 };
346
347 // If insn1 and insn2 use the same register to form an address, either
348 // by an offsetted LDR or a simple ADD, return the offset. If the
349 // second instruction is an LDR, the offset may be scaled.
350 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
351 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
352 Instruction_aarch64::extract(insn1, 4, 0) ==
353 Instruction_aarch64::extract(insn2, 9, 5)) {
354 // Load/store register (unsigned immediate)
355 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
356 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
357 byte_offset <<= size;
358 return true;
359 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
360 Instruction_aarch64::extract(insn1, 4, 0) ==
361 Instruction_aarch64::extract(insn2, 4, 0)) {
362 // add (immediate)
363 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
364 return true;
365 }
366 return false;
367 }
368
369 class AArch64Decoder : public AllStatic {
370 public:
371
372 static int loadStore(address insn_addr, address &target) {
373 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
374 target = insn_addr + (offset << 2);
375 return 1;
376 }
377 static int unconditionalBranch(address insn_addr, address &target) {
378 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 25, 0);
379 target = insn_addr + (offset << 2);
380 return 1;
381 }
382 static int conditionalBranch(address insn_addr, address &target) {
383 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
384 target = address(((uint64_t)insn_addr + (offset << 2)));
385 return 1;
386 }
387 static int testAndBranch(address insn_addr, address &target) {
388 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 18, 5);
389 target = address(((uint64_t)insn_addr + (offset << 2)));
390 return 1;
391 }
392 static int adr(address insn_addr, address &target) {
393 // PC-rel. addressing
394 uint32_t insn = insn_at(insn_addr, 0);
395 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
396 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
397 target = address((uint64_t)insn_addr + offset);
398 return 1;
399 }
400 template<typename U>
401 static int adrp(address insn_addr, address &target, U inner) {
402 uint32_t insn = insn_at(insn_addr, 0);
403 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
404 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
405 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
406 int shift = 12;
407 offset <<= shift;
408 uint64_t target_page = ((uint64_t)insn_addr) + offset;
409 target_page &= ((uint64_t)-1) << shift;
410 target = address(target_page);
411 precond(inner != nullptr);
412 inner(insn_addr, target);
413 return 2;
414 }
415 static int adrpMem(address insn_addr, address &target) {
416 uint32_t insn2 = insn_at(insn_addr, 1);
417 // Load/store register (unsigned immediate)
418 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
419 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
420 byte_offset <<= size;
421 target += byte_offset;
422 return 2;
423 }
424 static int adrpAdd(address insn_addr, address &target) {
425 uint32_t insn2 = insn_at(insn_addr, 1);
426 // add (immediate)
427 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
428 target += byte_offset;
429 return 2;
430 }
431 static int adrpMovk(address insn_addr, address &target) {
432 uint32_t insn2 = insn_at(insn_addr, 1);
433 uint64_t dest = uint64_t(target);
434 dest = (dest & 0xffff0000ffffffff) |
435 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
436 target = address(dest);
437
438 // We know the destination 4k page. Maybe we have a third
439 // instruction.
440 uint32_t insn = insn_at(insn_addr, 0);
441 uint32_t insn3 = insn_at(insn_addr, 2);
442 ptrdiff_t byte_offset;
443 if (offset_for(insn, insn3, byte_offset)) {
444 target += byte_offset;
445 return 3;
446 } else {
447 return 2;
448 }
449 }
450 static int immediate(address insn_addr, address &target) {
451 uint32_t *insns = (uint32_t *)insn_addr;
452 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
453 // Move wide constant: movz, movk, movk. See movptr().
454 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
455 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
456 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
457 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
458 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
459 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
460 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
461 return 3;
462 }
463 static void verify(address insn_addr, address &target) {
464 }
465 };
466
467 address MacroAssembler::target_addr_for_insn(address insn_addr) {
468 address target;
469 RelocActions<AArch64Decoder>::run(insn_addr, target);
470 return target;
471 }
472
473 // Patch any kind of instruction; there may be several instructions.
474 // Return the total length (in bytes) of the instructions.
475 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
476 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
477 return RelocActions<Patcher>::run(insn_addr, target);
478 }
479
480 int MacroAssembler::patch_oop(address insn_addr, address o) {
481 int instructions;
482 unsigned insn = *(unsigned*)insn_addr;
483 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
484
485 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
486
487 // OOPs are either narrow (32 bits) or wide (48 bits). We encode
488 // narrow OOPs by setting the upper 16 bits in the first
489 // instruction.
490 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
491 // Move narrow OOP
492 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
493 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
494 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
495 instructions = 2;
496 } else {
497 // Move wide OOP
498 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
499 uintptr_t dest = (uintptr_t)o;
500 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
501 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
502 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
503 instructions = 3;
504 }
505 return instructions * NativeInstruction::instruction_size;
506 }
507
508 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
509 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
510 // We encode narrow ones by setting the upper 16 bits in the first
511 // instruction.
512 NativeInstruction *insn = nativeInstruction_at(insn_addr);
513 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
514 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
515
516 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
517
518 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
519 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
520 return 2 * NativeInstruction::instruction_size;
521 }
522
523 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
524 ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
525 if (at_return) {
526 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
527 // we may safely use the sp instead to perform the stack watermark check.
528 cmp(in_nmethod ? sp : rfp, tmp);
529 br(Assembler::HI, slow_path);
530 } else {
531 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
532 }
533 }
534
535 void MacroAssembler::rt_call(address dest, Register tmp) {
536 CodeBlob *cb = CodeCache::find_blob(dest);
537 if (cb) {
538 far_call(RuntimeAddress(dest));
539 } else {
540 lea(tmp, RuntimeAddress(dest));
541 blr(tmp);
542 }
543 }
544
545 void MacroAssembler::push_cont_fastpath(Register java_thread) {
546 if (!Continuations::enabled()) return;
547 Label done;
548 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
549 cmp(sp, rscratch1);
550 br(Assembler::LS, done);
551 mov(rscratch1, sp); // we can't use sp as the source in str
552 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
553 bind(done);
554 }
555
556 void MacroAssembler::pop_cont_fastpath(Register java_thread) {
557 if (!Continuations::enabled()) return;
558 Label done;
559 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
560 cmp(sp, rscratch1);
561 br(Assembler::LO, done);
562 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
563 bind(done);
564 }
565
566 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
567 // we must set sp to zero to clear frame
568 str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
569
570 // must clear fp, so that compiled frames are not confused; it is
571 // possible that we need it only for debugging
572 if (clear_fp) {
573 str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
574 }
575
576 // Always clear the pc because it could have been set by make_walkable()
577 str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
578 }
579
580 // Calls to C land
581 //
582 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
583 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
584 // has to be reset to 0. This is required to allow proper stack traversal.
585 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
586 Register last_java_fp,
587 Register last_java_pc,
588 Register scratch) {
589
590 if (last_java_pc->is_valid()) {
591 str(last_java_pc, Address(rthread,
592 JavaThread::frame_anchor_offset()
593 + JavaFrameAnchor::last_Java_pc_offset()));
594 }
595
596 // determine last_java_sp register
597 if (last_java_sp == sp) {
598 mov(scratch, sp);
599 last_java_sp = scratch;
600 } else if (!last_java_sp->is_valid()) {
601 last_java_sp = esp;
602 }
603
604 // last_java_fp is optional
605 if (last_java_fp->is_valid()) {
606 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
607 }
608
609 // We must set sp last.
610 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
611 }
612
613 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
614 Register last_java_fp,
615 address last_java_pc,
616 Register scratch) {
617 assert(last_java_pc != nullptr, "must provide a valid PC");
618
619 adr(scratch, last_java_pc);
620 str(scratch, Address(rthread,
621 JavaThread::frame_anchor_offset()
622 + JavaFrameAnchor::last_Java_pc_offset()));
623
624 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
625 }
626
627 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
628 Register last_java_fp,
629 Label &L,
630 Register scratch) {
631 if (L.is_bound()) {
632 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
633 } else {
634 InstructionMark im(this);
635 L.add_patch_at(code(), locator());
636 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
637 }
638 }
639
640 static inline bool target_needs_far_branch(address addr) {
641 if (AOTCodeCache::is_on_for_dump()) {
642 return true;
643 }
644 // codecache size <= 128M
645 if (!MacroAssembler::far_branches()) {
646 return false;
647 }
648 // codecache size > 240M
649 if (MacroAssembler::codestub_branch_needs_far_jump()) {
650 return true;
651 }
652 // codecache size: 128M..240M
653 return !CodeCache::is_non_nmethod(addr);
654 }
655
656 void MacroAssembler::far_call(Address entry, Register tmp) {
657 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
658 assert(CodeCache::find_blob(entry.target()) != nullptr,
659 "destination of far call not found in code cache");
660 assert(entry.rspec().type() == relocInfo::external_word_type
661 || entry.rspec().type() == relocInfo::runtime_call_type
662 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
663 if (target_needs_far_branch(entry.target())) {
664 uint64_t offset;
665 // We can use ADRP here because we know that the total size of
666 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
667 adrp(tmp, entry, offset);
668 add(tmp, tmp, offset);
669 blr(tmp);
670 } else {
671 bl(entry);
672 }
673 }
674
675 int MacroAssembler::far_jump(Address entry, Register tmp) {
676 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
677 assert(CodeCache::find_blob(entry.target()) != nullptr,
678 "destination of far call not found in code cache");
679 assert(entry.rspec().type() == relocInfo::external_word_type
680 || entry.rspec().type() == relocInfo::runtime_call_type
681 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
682 address start = pc();
683 if (target_needs_far_branch(entry.target())) {
684 uint64_t offset;
685 // We can use ADRP here because we know that the total size of
686 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
687 adrp(tmp, entry, offset);
688 add(tmp, tmp, offset);
689 br(tmp);
690 } else {
691 b(entry);
692 }
693 return pc() - start;
694 }
695
696 void MacroAssembler::reserved_stack_check() {
697 // testing if reserved zone needs to be enabled
698 Label no_reserved_zone_enabling;
699
700 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
701 cmp(sp, rscratch1);
702 br(Assembler::LO, no_reserved_zone_enabling);
703
704 enter(); // LR and FP are live.
705 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)));
706 mov(c_rarg0, rthread);
707 blr(rscratch1);
708 leave();
709
710 // We have already removed our own frame.
711 // throw_delayed_StackOverflowError will think that it's been
712 // called by our caller.
713 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
714 br(rscratch1);
715 should_not_reach_here();
716
717 bind(no_reserved_zone_enabling);
718 }
719
720 static void pass_arg0(MacroAssembler* masm, Register arg) {
721 if (c_rarg0 != arg ) {
722 masm->mov(c_rarg0, arg);
723 }
724 }
725
726 static void pass_arg1(MacroAssembler* masm, Register arg) {
727 if (c_rarg1 != arg ) {
728 masm->mov(c_rarg1, arg);
729 }
730 }
731
732 static void pass_arg2(MacroAssembler* masm, Register arg) {
733 if (c_rarg2 != arg ) {
734 masm->mov(c_rarg2, arg);
735 }
736 }
737
738 static void pass_arg3(MacroAssembler* masm, Register arg) {
739 if (c_rarg3 != arg ) {
740 masm->mov(c_rarg3, arg);
741 }
742 }
743
744 void MacroAssembler::call_VM_base(Register oop_result,
745 Register java_thread,
746 Register last_java_sp,
747 Label* return_pc,
748 address entry_point,
749 int number_of_arguments,
750 bool check_exceptions) {
751 // determine java_thread register
752 if (!java_thread->is_valid()) {
753 java_thread = rthread;
754 }
755
756 // determine last_java_sp register
757 if (!last_java_sp->is_valid()) {
758 last_java_sp = esp;
759 }
760
761 // debugging support
762 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
763 assert(java_thread == rthread, "unexpected register");
764 #ifdef ASSERT
765 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
766 // if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
767 #endif // ASSERT
768
769 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
770 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
771
772 // push java thread (becomes first argument of C function)
773
774 mov(c_rarg0, java_thread);
775
776 // set last Java frame before call
777 assert(last_java_sp != rfp, "can't use rfp");
778
779 Label l;
780 set_last_Java_frame(last_java_sp, rfp, return_pc != nullptr ? *return_pc : l, rscratch1);
781
782 // do the call, remove parameters
783 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
784
785 // lr could be poisoned with PAC signature during throw_pending_exception
786 // if it was tail-call optimized by compiler, since lr is not callee-saved
787 // reload it with proper value
788 adr(lr, l);
789
790 // reset last Java frame
791 // Only interpreter should have to clear fp
792 reset_last_Java_frame(true);
793
794 // C++ interp handles this in the interpreter
795 check_and_handle_popframe(java_thread);
796 check_and_handle_earlyret(java_thread);
797
798 if (check_exceptions) {
799 // check for pending exceptions (java_thread is set upon return)
800 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
801 Label ok;
802 cbz(rscratch1, ok);
803 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
804 br(rscratch1);
805 bind(ok);
806 }
807
808 // get oop result if there is one and reset the value in the thread
809 if (oop_result->is_valid()) {
810 get_vm_result_oop(oop_result, java_thread);
811 }
812 }
813
814 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
815 call_VM_base(oop_result, noreg, noreg, nullptr, entry_point, number_of_arguments, check_exceptions);
816 }
817
818 // Check the entry target is always reachable from any branch.
819 static bool is_always_within_branch_range(Address entry) {
820 if (AOTCodeCache::is_on_for_dump()) {
821 return false;
822 }
823 const address target = entry.target();
824
825 if (!CodeCache::contains(target)) {
826 // We always use trampolines for callees outside CodeCache.
827 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
828 return false;
829 }
830
831 if (!MacroAssembler::far_branches()) {
832 return true;
833 }
834
835 if (entry.rspec().type() == relocInfo::runtime_call_type) {
836 // Runtime calls are calls of a non-compiled method (stubs, adapters).
837 // Non-compiled methods stay forever in CodeCache.
838 // We check whether the longest possible branch is within the branch range.
839 assert(CodeCache::find_blob(target) != nullptr &&
840 !CodeCache::find_blob(target)->is_nmethod(),
841 "runtime call of compiled method");
842 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
843 const address left_longest_branch_start = CodeCache::low_bound();
844 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
845 Assembler::reachable_from_branch_at(right_longest_branch_start, target);
846 return is_reachable;
847 }
848
849 return false;
850 }
851
852 // Maybe emit a call via a trampoline. If the code cache is small
853 // trampolines won't be emitted.
854 address MacroAssembler::trampoline_call(Address entry) {
855 assert(entry.rspec().type() == relocInfo::runtime_call_type
856 || entry.rspec().type() == relocInfo::opt_virtual_call_type
857 || entry.rspec().type() == relocInfo::static_call_type
858 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
859
860 address target = entry.target();
861
862 if (!is_always_within_branch_range(entry)) {
863 if (!in_scratch_emit_size()) {
864 // We don't want to emit a trampoline if C2 is generating dummy
865 // code during its branch shortening phase.
866 if (entry.rspec().type() == relocInfo::runtime_call_type) {
867 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
868 code()->share_trampoline_for(entry.target(), offset());
869 } else {
870 address stub = emit_trampoline_stub(offset(), target);
871 if (stub == nullptr) {
872 postcond(pc() == badAddress);
873 return nullptr; // CodeCache is full
874 }
875 }
876 }
877 target = pc();
878 }
879
880 address call_pc = pc();
881 relocate(entry.rspec());
882 bl(target);
883
884 postcond(pc() != badAddress);
885 return call_pc;
886 }
887
888 // Emit a trampoline stub for a call to a target which is too far away.
889 //
890 // code sequences:
891 //
892 // call-site:
893 // branch-and-link to <destination> or <trampoline stub>
894 //
895 // Related trampoline stub for this call site in the stub section:
896 // load the call target from the constant pool
897 // branch (LR still points to the call site above)
898
899 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
900 address dest) {
901 // Max stub size: alignment nop, TrampolineStub.
902 address stub = start_a_stub(max_trampoline_stub_size());
903 if (stub == nullptr) {
904 return nullptr; // CodeBuffer::expand failed
905 }
906
907 // Create a trampoline stub relocation which relates this trampoline stub
908 // with the call instruction at insts_call_instruction_offset in the
909 // instructions code-section.
910 align(wordSize);
911 relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
912 + insts_call_instruction_offset));
913 const int stub_start_offset = offset();
914
915 // Now, create the trampoline stub's code:
916 // - load the call
917 // - call
918 Label target;
919 ldr(rscratch1, target);
920 br(rscratch1);
921 bind(target);
922 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
923 "should be");
924 emit_int64((int64_t)dest);
925
926 const address stub_start_addr = addr_at(stub_start_offset);
927
928 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
929
930 end_a_stub();
931 return stub_start_addr;
932 }
933
934 int MacroAssembler::max_trampoline_stub_size() {
935 // Max stub size: alignment nop, TrampolineStub.
936 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
937 }
938
939 void MacroAssembler::emit_static_call_stub() {
940 // CompiledDirectCall::set_to_interpreted knows the
941 // exact layout of this stub.
942
943 isb();
944 mov_metadata(rmethod, nullptr);
945
946 // Jump to the entry point of the c2i stub.
947 if (codestub_branch_needs_far_jump()) {
948 movptr(rscratch1, 0);
949 br(rscratch1);
950 } else {
951 b(pc());
952 }
953 }
954
955 int MacroAssembler::static_call_stub_size() {
956 // During AOT production run AOT and JIT compiled code
957 // are used at the same time. We need this size
958 // to be the same for both types of code.
959 if (!codestub_branch_needs_far_jump() && !AOTCodeCache::is_on_for_use()) {
960 // isb; movk; movz; movz; b
961 return 5 * NativeInstruction::instruction_size;
962 }
963 // isb; movk; movz; movz; movk; movz; movz; br
964 return 8 * NativeInstruction::instruction_size;
965 }
966
967 void MacroAssembler::c2bool(Register x) {
968 // implements x == 0 ? 0 : 1
969 // note: must only look at least-significant byte of x
970 // since C-style booleans are stored in one byte
971 // only! (was bug)
972 tst(x, 0xff);
973 cset(x, Assembler::NE);
974 }
975
976 address MacroAssembler::ic_call(address entry, jint method_index) {
977 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
978 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
979 return trampoline_call(Address(entry, rh));
980 }
981
982 int MacroAssembler::ic_check_size() {
983 int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
984 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
985 return NativeInstruction::instruction_size * (7 + extra_instructions);
986 } else {
987 return NativeInstruction::instruction_size * (5 + extra_instructions);
988 }
989 }
990
991 int MacroAssembler::ic_check(int end_alignment) {
992 Register receiver = j_rarg0;
993 Register data = rscratch2;
994 Register tmp1 = rscratch1;
995 Register tmp2 = r10;
996
997 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
998 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
999 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1000 // before the inline cache check here, and not after
1001 align(end_alignment, offset() + ic_check_size());
1002
1003 int uep_offset = offset();
1004
1005 if (UseCompactObjectHeaders) {
1006 load_narrow_klass_compact(tmp1, receiver);
1007 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1008 cmpw(tmp1, tmp2);
1009 } else {
1010 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1011 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1012 cmpw(tmp1, tmp2);
1013 }
1014
1015 Label dont;
1016 br(Assembler::EQ, dont);
1017 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1018 bind(dont);
1019 assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1020
1021 return uep_offset;
1022 }
1023
1024 // Implementation of call_VM versions
1025
1026 void MacroAssembler::call_VM(Register oop_result,
1027 address entry_point,
1028 bool check_exceptions) {
1029 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1030 }
1031
1032 void MacroAssembler::call_VM(Register oop_result,
1033 address entry_point,
1034 Register arg_1,
1035 bool check_exceptions) {
1036 pass_arg1(this, arg_1);
1037 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1038 }
1039
1040 void MacroAssembler::call_VM(Register oop_result,
1041 address entry_point,
1042 Register arg_1,
1043 Register arg_2,
1044 bool check_exceptions) {
1045 assert_different_registers(arg_1, c_rarg2);
1046 pass_arg2(this, arg_2);
1047 pass_arg1(this, arg_1);
1048 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1049 }
1050
1051 void MacroAssembler::call_VM(Register oop_result,
1052 address entry_point,
1053 Register arg_1,
1054 Register arg_2,
1055 Register arg_3,
1056 bool check_exceptions) {
1057 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1058 assert_different_registers(arg_2, c_rarg3);
1059 pass_arg3(this, arg_3);
1060
1061 pass_arg2(this, arg_2);
1062
1063 pass_arg1(this, arg_1);
1064 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1065 }
1066
1067 void MacroAssembler::call_VM(Register oop_result,
1068 Register last_java_sp,
1069 address entry_point,
1070 int number_of_arguments,
1071 bool check_exceptions) {
1072 call_VM_base(oop_result, rthread, last_java_sp, nullptr, entry_point, number_of_arguments, check_exceptions);
1073 }
1074
1075 void MacroAssembler::call_VM(Register oop_result,
1076 Register last_java_sp,
1077 address entry_point,
1078 Register arg_1,
1079 bool check_exceptions) {
1080 pass_arg1(this, arg_1);
1081 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1082 }
1083
1084 void MacroAssembler::call_VM(Register oop_result,
1085 Register last_java_sp,
1086 address entry_point,
1087 Register arg_1,
1088 Register arg_2,
1089 bool check_exceptions) {
1090
1091 assert_different_registers(arg_1, c_rarg2);
1092 pass_arg2(this, arg_2);
1093 pass_arg1(this, arg_1);
1094 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1095 }
1096
1097 void MacroAssembler::call_VM(Register oop_result,
1098 Register last_java_sp,
1099 address entry_point,
1100 Register arg_1,
1101 Register arg_2,
1102 Register arg_3,
1103 bool check_exceptions) {
1104 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1105 assert_different_registers(arg_2, c_rarg3);
1106 pass_arg3(this, arg_3);
1107 pass_arg2(this, arg_2);
1108 pass_arg1(this, arg_1);
1109 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1110 }
1111
1112
1113 void MacroAssembler::get_vm_result_oop(Register oop_result, Register java_thread) {
1114 ldr(oop_result, Address(java_thread, JavaThread::vm_result_oop_offset()));
1115 str(zr, Address(java_thread, JavaThread::vm_result_oop_offset()));
1116 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1117 }
1118
1119 void MacroAssembler::get_vm_result_metadata(Register metadata_result, Register java_thread) {
1120 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1121 str(zr, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1122 }
1123
1124 void MacroAssembler::align(int modulus) {
1125 align(modulus, offset());
1126 }
1127
1128 // Ensure that the code at target bytes offset from the current offset() is aligned
1129 // according to modulus.
1130 void MacroAssembler::align(int modulus, int target) {
1131 int delta = target - offset();
1132 while ((offset() + delta) % modulus != 0) nop();
1133 }
1134
1135 void MacroAssembler::post_call_nop() {
1136 if (!Continuations::enabled()) {
1137 return;
1138 }
1139 InstructionMark im(this);
1140 relocate(post_call_nop_Relocation::spec());
1141 InlineSkippedInstructionsCounter skipCounter(this);
1142 nop();
1143 movk(zr, 0);
1144 movk(zr, 0);
1145 }
1146
1147 // these are no-ops overridden by InterpreterMacroAssembler
1148
1149 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1150
1151 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1152
1153 // Look up the method for a megamorphic invokeinterface call.
1154 // The target method is determined by <intf_klass, itable_index>.
1155 // The receiver klass is in recv_klass.
1156 // On success, the result will be in method_result, and execution falls through.
1157 // On failure, execution transfers to the given label.
1158 void MacroAssembler::lookup_interface_method(Register recv_klass,
1159 Register intf_klass,
1160 RegisterOrConstant itable_index,
1161 Register method_result,
1162 Register scan_temp,
1163 Label& L_no_such_interface,
1164 bool return_method) {
1165 assert_different_registers(recv_klass, intf_klass, scan_temp);
1166 assert_different_registers(method_result, intf_klass, scan_temp);
1167 assert(recv_klass != method_result || !return_method,
1168 "recv_klass can be destroyed when method isn't needed");
1169 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1170 "caller must use same register for non-constant itable index as for method");
1171
1172 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1173 int vtable_base = in_bytes(Klass::vtable_start_offset());
1174 int itentry_off = in_bytes(itableMethodEntry::method_offset());
1175 int scan_step = itableOffsetEntry::size() * wordSize;
1176 int vte_size = vtableEntry::size_in_bytes();
1177 assert(vte_size == wordSize, "else adjust times_vte_scale");
1178
1179 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1180
1181 // Could store the aligned, prescaled offset in the klass.
1182 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1183 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1184 add(scan_temp, scan_temp, vtable_base);
1185
1186 if (return_method) {
1187 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1188 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1189 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1190 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1191 if (itentry_off)
1192 add(recv_klass, recv_klass, itentry_off);
1193 }
1194
1195 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1196 // if (scan->interface() == intf) {
1197 // result = (klass + scan->offset() + itable_index);
1198 // }
1199 // }
1200 Label search, found_method;
1201
1202 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1203 cmp(intf_klass, method_result);
1204 br(Assembler::EQ, found_method);
1205 bind(search);
1206 // Check that the previous entry is non-null. A null entry means that
1207 // the receiver class doesn't implement the interface, and wasn't the
1208 // same as when the caller was compiled.
1209 cbz(method_result, L_no_such_interface);
1210 if (itableOffsetEntry::interface_offset() != 0) {
1211 add(scan_temp, scan_temp, scan_step);
1212 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1213 } else {
1214 ldr(method_result, Address(pre(scan_temp, scan_step)));
1215 }
1216 cmp(intf_klass, method_result);
1217 br(Assembler::NE, search);
1218
1219 bind(found_method);
1220
1221 // Got a hit.
1222 if (return_method) {
1223 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1224 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1225 }
1226 }
1227
1228 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1229 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1230 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1231 // The target method is determined by <holder_klass, itable_index>.
1232 // The receiver klass is in recv_klass.
1233 // On success, the result will be in method_result, and execution falls through.
1234 // On failure, execution transfers to the given label.
1235 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1236 Register holder_klass,
1237 Register resolved_klass,
1238 Register method_result,
1239 Register temp_itbl_klass,
1240 Register scan_temp,
1241 int itable_index,
1242 Label& L_no_such_interface) {
1243 // 'method_result' is only used as output register at the very end of this method.
1244 // Until then we can reuse it as 'holder_offset'.
1245 Register holder_offset = method_result;
1246 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1247
1248 int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1249 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1250 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1251 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1252
1253 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1254
1255 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1256 add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1257 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1258 // temp_itbl_klass = itable[0]._interface;
1259 int vtblEntrySize = vtableEntry::size_in_bytes();
1260 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1261 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1262 mov(holder_offset, zr);
1263 // scan_temp = &(itable[0]._interface)
1264 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1265
1266 // Initial checks:
1267 // - if (holder_klass != resolved_klass), go to "scan for resolved"
1268 // - if (itable[0] == holder_klass), shortcut to "holder found"
1269 // - if (itable[0] == 0), no such interface
1270 cmp(resolved_klass, holder_klass);
1271 br(Assembler::NE, L_loop_search_resolved_entry);
1272 cmp(holder_klass, temp_itbl_klass);
1273 br(Assembler::EQ, L_holder_found);
1274 cbz(temp_itbl_klass, L_no_such_interface);
1275
1276 // Loop: Look for holder_klass record in itable
1277 // do {
1278 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1279 // if (temp_itbl_klass == holder_klass) {
1280 // goto L_holder_found; // Found!
1281 // }
1282 // } while (temp_itbl_klass != 0);
1283 // goto L_no_such_interface // Not found.
1284 Label L_search_holder;
1285 bind(L_search_holder);
1286 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1287 cmp(holder_klass, temp_itbl_klass);
1288 br(Assembler::EQ, L_holder_found);
1289 cbnz(temp_itbl_klass, L_search_holder);
1290
1291 b(L_no_such_interface);
1292
1293 // Loop: Look for resolved_class record in itable
1294 // while (true) {
1295 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1296 // if (temp_itbl_klass == 0) {
1297 // goto L_no_such_interface;
1298 // }
1299 // if (temp_itbl_klass == resolved_klass) {
1300 // goto L_resolved_found; // Found!
1301 // }
1302 // if (temp_itbl_klass == holder_klass) {
1303 // holder_offset = scan_temp;
1304 // }
1305 // }
1306 //
1307 Label L_loop_search_resolved;
1308 bind(L_loop_search_resolved);
1309 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1310 bind(L_loop_search_resolved_entry);
1311 cbz(temp_itbl_klass, L_no_such_interface);
1312 cmp(resolved_klass, temp_itbl_klass);
1313 br(Assembler::EQ, L_resolved_found);
1314 cmp(holder_klass, temp_itbl_klass);
1315 br(Assembler::NE, L_loop_search_resolved);
1316 mov(holder_offset, scan_temp);
1317 b(L_loop_search_resolved);
1318
1319 // See if we already have a holder klass. If not, go and scan for it.
1320 bind(L_resolved_found);
1321 cbz(holder_offset, L_search_holder);
1322 mov(scan_temp, holder_offset);
1323
1324 // Finally, scan_temp contains holder_klass vtable offset
1325 bind(L_holder_found);
1326 ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1327 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1328 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1329 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1330 }
1331
1332 // virtual method calling
1333 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1334 RegisterOrConstant vtable_index,
1335 Register method_result) {
1336 assert(vtableEntry::size() * wordSize == 8,
1337 "adjust the scaling in the code below");
1338 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1339
1340 if (vtable_index.is_register()) {
1341 lea(method_result, Address(recv_klass,
1342 vtable_index.as_register(),
1343 Address::lsl(LogBytesPerWord)));
1344 ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1345 } else {
1346 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1347 ldr(method_result,
1348 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1349 }
1350 }
1351
1352 void MacroAssembler::check_klass_subtype(Register sub_klass,
1353 Register super_klass,
1354 Register temp_reg,
1355 Label& L_success) {
1356 Label L_failure;
1357 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
1358 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1359 bind(L_failure);
1360 }
1361
1362
1363 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1364 Register super_klass,
1365 Register temp_reg,
1366 Label* L_success,
1367 Label* L_failure,
1368 Label* L_slow_path,
1369 Register super_check_offset) {
1370 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset);
1371 bool must_load_sco = ! super_check_offset->is_valid();
1372 if (must_load_sco) {
1373 assert(temp_reg != noreg, "supply either a temp or a register offset");
1374 }
1375
1376 Label L_fallthrough;
1377 int label_nulls = 0;
1378 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1379 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1380 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1381 assert(label_nulls <= 1, "at most one null in the batch");
1382
1383 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1384 Address super_check_offset_addr(super_klass, sco_offset);
1385
1386 // Hacked jmp, which may only be used just before L_fallthrough.
1387 #define final_jmp(label) \
1388 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
1389 else b(label) /*omit semi*/
1390
1391 // If the pointers are equal, we are done (e.g., String[] elements).
1392 // This self-check enables sharing of secondary supertype arrays among
1393 // non-primary types such as array-of-interface. Otherwise, each such
1394 // type would need its own customized SSA.
1395 // We move this check to the front of the fast path because many
1396 // type checks are in fact trivially successful in this manner,
1397 // so we get a nicely predicted branch right at the start of the check.
1398 cmp(sub_klass, super_klass);
1399 br(Assembler::EQ, *L_success);
1400
1401 // Check the supertype display:
1402 if (must_load_sco) {
1403 ldrw(temp_reg, super_check_offset_addr);
1404 super_check_offset = temp_reg;
1405 }
1406
1407 Address super_check_addr(sub_klass, super_check_offset);
1408 ldr(rscratch1, super_check_addr);
1409 cmp(super_klass, rscratch1); // load displayed supertype
1410 br(Assembler::EQ, *L_success);
1411
1412 // This check has worked decisively for primary supers.
1413 // Secondary supers are sought in the super_cache ('super_cache_addr').
1414 // (Secondary supers are interfaces and very deeply nested subtypes.)
1415 // This works in the same check above because of a tricky aliasing
1416 // between the super_cache and the primary super display elements.
1417 // (The 'super_check_addr' can address either, as the case requires.)
1418 // Note that the cache is updated below if it does not help us find
1419 // what we need immediately.
1420 // So if it was a primary super, we can just fail immediately.
1421 // Otherwise, it's the slow path for us (no success at this point).
1422
1423 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
1424 if (L_failure == &L_fallthrough) {
1425 cbz(rscratch1, *L_slow_path);
1426 } else {
1427 cbnz(rscratch1, *L_failure);
1428 final_jmp(*L_slow_path);
1429 }
1430
1431 bind(L_fallthrough);
1432
1433 #undef final_jmp
1434 }
1435
1436 // These two are taken from x86, but they look generally useful
1437
1438 // scans count pointer sized words at [addr] for occurrence of value,
1439 // generic
1440 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1441 Register scratch) {
1442 Label Lloop, Lexit;
1443 cbz(count, Lexit);
1444 bind(Lloop);
1445 ldr(scratch, post(addr, wordSize));
1446 cmp(value, scratch);
1447 br(EQ, Lexit);
1448 sub(count, count, 1);
1449 cbnz(count, Lloop);
1450 bind(Lexit);
1451 }
1452
1453 // scans count 4 byte words at [addr] for occurrence of value,
1454 // generic
1455 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1456 Register scratch) {
1457 Label Lloop, Lexit;
1458 cbz(count, Lexit);
1459 bind(Lloop);
1460 ldrw(scratch, post(addr, wordSize));
1461 cmpw(value, scratch);
1462 br(EQ, Lexit);
1463 sub(count, count, 1);
1464 cbnz(count, Lloop);
1465 bind(Lexit);
1466 }
1467
1468 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
1469 Register super_klass,
1470 Register temp_reg,
1471 Register temp2_reg,
1472 Label* L_success,
1473 Label* L_failure,
1474 bool set_cond_codes) {
1475 // NB! Callers may assume that, when temp2_reg is a valid register,
1476 // this code sets it to a nonzero value.
1477
1478 assert_different_registers(sub_klass, super_klass, temp_reg);
1479 if (temp2_reg != noreg)
1480 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1481 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1482
1483 Label L_fallthrough;
1484 int label_nulls = 0;
1485 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1486 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1487 assert(label_nulls <= 1, "at most one null in the batch");
1488
1489 // a couple of useful fields in sub_klass:
1490 int ss_offset = in_bytes(Klass::secondary_supers_offset());
1491 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1492 Address secondary_supers_addr(sub_klass, ss_offset);
1493 Address super_cache_addr( sub_klass, sc_offset);
1494
1495 BLOCK_COMMENT("check_klass_subtype_slow_path");
1496
1497 // Do a linear scan of the secondary super-klass chain.
1498 // This code is rarely used, so simplicity is a virtue here.
1499 // The repne_scan instruction uses fixed registers, which we must spill.
1500 // Don't worry too much about pre-existing connections with the input regs.
1501
1502 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1503 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1504
1505 RegSet pushed_registers;
1506 if (!IS_A_TEMP(r2)) pushed_registers += r2;
1507 if (!IS_A_TEMP(r5)) pushed_registers += r5;
1508
1509 if (super_klass != r0) {
1510 if (!IS_A_TEMP(r0)) pushed_registers += r0;
1511 }
1512
1513 push(pushed_registers, sp);
1514
1515 // Get super_klass value into r0 (even if it was in r5 or r2).
1516 if (super_klass != r0) {
1517 mov(r0, super_klass);
1518 }
1519
1520 #ifndef PRODUCT
1521 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1522 #endif //PRODUCT
1523
1524 // We will consult the secondary-super array.
1525 ldr(r5, secondary_supers_addr);
1526 // Load the array length.
1527 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1528 // Skip to start of data.
1529 add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1530
1531 cmp(sp, zr); // Clear Z flag; SP is never zero
1532 // Scan R2 words at [R5] for an occurrence of R0.
1533 // Set NZ/Z based on last compare.
1534 repne_scan(r5, r0, r2, rscratch1);
1535
1536 // Unspill the temp. registers:
1537 pop(pushed_registers, sp);
1538
1539 br(Assembler::NE, *L_failure);
1540
1541 // Success. Cache the super we found and proceed in triumph.
1542
1543 if (UseSecondarySupersCache) {
1544 str(super_klass, super_cache_addr);
1545 }
1546
1547 if (L_success != &L_fallthrough) {
1548 b(*L_success);
1549 }
1550
1551 #undef IS_A_TEMP
1552
1553 bind(L_fallthrough);
1554 }
1555
1556 // If Register r is invalid, remove a new register from
1557 // available_regs, and add new register to regs_to_push.
1558 Register MacroAssembler::allocate_if_noreg(Register r,
1559 RegSetIterator<Register> &available_regs,
1560 RegSet ®s_to_push) {
1561 if (!r->is_valid()) {
1562 r = *available_regs++;
1563 regs_to_push += r;
1564 }
1565 return r;
1566 }
1567
1568 // check_klass_subtype_slow_path_table() looks for super_klass in the
1569 // hash table belonging to super_klass, branching to L_success or
1570 // L_failure as appropriate. This is essentially a shim which
1571 // allocates registers as necessary then calls
1572 // lookup_secondary_supers_table() to do the work. Any of the temp
1573 // regs may be noreg, in which case this logic will chooses some
1574 // registers push and pop them from the stack.
1575 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
1576 Register super_klass,
1577 Register temp_reg,
1578 Register temp2_reg,
1579 Register temp3_reg,
1580 Register result_reg,
1581 FloatRegister vtemp,
1582 Label* L_success,
1583 Label* L_failure,
1584 bool set_cond_codes) {
1585 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
1586
1587 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1588
1589 Label L_fallthrough;
1590 int label_nulls = 0;
1591 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1592 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1593 assert(label_nulls <= 1, "at most one null in the batch");
1594
1595 BLOCK_COMMENT("check_klass_subtype_slow_path");
1596
1597 RegSetIterator<Register> available_regs
1598 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin();
1599
1600 RegSet pushed_regs;
1601
1602 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
1603 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
1604 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
1605 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
1606
1607 push(pushed_regs, sp);
1608
1609 lookup_secondary_supers_table_var(sub_klass,
1610 super_klass,
1611 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg,
1612 nullptr);
1613 cmp(result_reg, zr);
1614
1615 // Unspill the temp. registers:
1616 pop(pushed_regs, sp);
1617
1618 // NB! Callers may assume that, when set_cond_codes is true, this
1619 // code sets temp2_reg to a nonzero value.
1620 if (set_cond_codes) {
1621 mov(temp2_reg, 1);
1622 }
1623
1624 br(Assembler::NE, *L_failure);
1625
1626 if (L_success != &L_fallthrough) {
1627 b(*L_success);
1628 }
1629
1630 bind(L_fallthrough);
1631 }
1632
1633 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1634 Register super_klass,
1635 Register temp_reg,
1636 Register temp2_reg,
1637 Label* L_success,
1638 Label* L_failure,
1639 bool set_cond_codes) {
1640 if (UseSecondarySupersTable) {
1641 check_klass_subtype_slow_path_table
1642 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg,
1643 /*vtemp*/fnoreg,
1644 L_success, L_failure, set_cond_codes);
1645 } else {
1646 check_klass_subtype_slow_path_linear
1647 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes);
1648 }
1649 }
1650
1651
1652 // Ensure that the inline code and the stub are using the same registers.
1653 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
1654 do { \
1655 assert(r_super_klass == r0 && \
1656 r_array_base == r1 && \
1657 r_array_length == r2 && \
1658 (r_array_index == r3 || r_array_index == noreg) && \
1659 (r_sub_klass == r4 || r_sub_klass == noreg) && \
1660 (r_bitmap == rscratch2 || r_bitmap == noreg) && \
1661 (result == r5 || result == noreg), "registers must match aarch64.ad"); \
1662 } while(0)
1663
1664 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
1665 Register r_super_klass,
1666 Register temp1,
1667 Register temp2,
1668 Register temp3,
1669 FloatRegister vtemp,
1670 Register result,
1671 u1 super_klass_slot,
1672 bool stub_is_near) {
1673 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1674
1675 Label L_fallthrough;
1676
1677 BLOCK_COMMENT("lookup_secondary_supers_table {");
1678
1679 const Register
1680 r_array_base = temp1, // r1
1681 r_array_length = temp2, // r2
1682 r_array_index = temp3, // r3
1683 r_bitmap = rscratch2;
1684
1685 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1686
1687 u1 bit = super_klass_slot;
1688
1689 // Make sure that result is nonzero if the TBZ below misses.
1690 mov(result, 1);
1691
1692 // We're going to need the bitmap in a vector reg and in a core reg,
1693 // so load both now.
1694 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1695 if (bit != 0) {
1696 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1697 }
1698 // First check the bitmap to see if super_klass might be present. If
1699 // the bit is zero, we are certain that super_klass is not one of
1700 // the secondary supers.
1701 tbz(r_bitmap, bit, L_fallthrough);
1702
1703 // Get the first array index that can contain super_klass into r_array_index.
1704 if (bit != 0) {
1705 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1706 cnt(vtemp, T8B, vtemp);
1707 addv(vtemp, T8B, vtemp);
1708 fmovd(r_array_index, vtemp);
1709 } else {
1710 mov(r_array_index, (u1)1);
1711 }
1712 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1713
1714 // We will consult the secondary-super array.
1715 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1716
1717 // The value i in r_array_index is >= 1, so even though r_array_base
1718 // points to the length, we don't need to adjust it to point to the
1719 // data.
1720 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1721 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1722
1723 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1724 eor(result, result, r_super_klass);
1725 cbz(result, L_fallthrough); // Found a match
1726
1727 // Is there another entry to check? Consult the bitmap.
1728 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1729
1730 // Linear probe.
1731 if (bit != 0) {
1732 ror(r_bitmap, r_bitmap, bit);
1733 }
1734
1735 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1736 // The next slot to be inspected, by the stub we're about to call,
1737 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1738 // have been checked.
1739 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1740 if (stub_is_near) {
1741 bl(stub);
1742 } else {
1743 address call = trampoline_call(stub);
1744 if (call == nullptr) {
1745 return false; // trampoline allocation failed
1746 }
1747 }
1748
1749 BLOCK_COMMENT("} lookup_secondary_supers_table");
1750
1751 bind(L_fallthrough);
1752
1753 if (VerifySecondarySupers) {
1754 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1755 temp1, temp2, result); // r1, r2, r5
1756 }
1757 return true;
1758 }
1759
1760 // At runtime, return 0 in result if r_super_klass is a superclass of
1761 // r_sub_klass, otherwise return nonzero. Use this version of
1762 // lookup_secondary_supers_table() if you don't know ahead of time
1763 // which superclass will be searched for. Used by interpreter and
1764 // runtime stubs. It is larger and has somewhat greater latency than
1765 // the version above, which takes a constant super_klass_slot.
1766 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
1767 Register r_super_klass,
1768 Register temp1,
1769 Register temp2,
1770 Register temp3,
1771 FloatRegister vtemp,
1772 Register result,
1773 Label *L_success) {
1774 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1775
1776 Label L_fallthrough;
1777
1778 BLOCK_COMMENT("lookup_secondary_supers_table {");
1779
1780 const Register
1781 r_array_index = temp3,
1782 slot = rscratch1,
1783 r_bitmap = rscratch2;
1784
1785 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
1786
1787 // Make sure that result is nonzero if the test below misses.
1788 mov(result, 1);
1789
1790 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1791
1792 // First check the bitmap to see if super_klass might be present. If
1793 // the bit is zero, we are certain that super_klass is not one of
1794 // the secondary supers.
1795
1796 // This next instruction is equivalent to:
1797 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1798 // sub(temp2, tmp_reg, slot);
1799 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1800 lslv(temp2, r_bitmap, temp2);
1801 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough);
1802
1803 bool must_save_v0 = (vtemp == fnoreg);
1804 if (must_save_v0) {
1805 // temp1 and result are free, so use them to preserve vtemp
1806 vtemp = v0;
1807 mov(temp1, vtemp, D, 0);
1808 mov(result, vtemp, D, 1);
1809 }
1810
1811 // Get the first array index that can contain super_klass into r_array_index.
1812 mov(vtemp, D, 0, temp2);
1813 cnt(vtemp, T8B, vtemp);
1814 addv(vtemp, T8B, vtemp);
1815 mov(r_array_index, vtemp, D, 0);
1816
1817 if (must_save_v0) {
1818 mov(vtemp, D, 0, temp1 );
1819 mov(vtemp, D, 1, result);
1820 }
1821
1822 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1823
1824 const Register
1825 r_array_base = temp1,
1826 r_array_length = temp2;
1827
1828 // The value i in r_array_index is >= 1, so even though r_array_base
1829 // points to the length, we don't need to adjust it to point to the
1830 // data.
1831 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1832 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1833
1834 // We will consult the secondary-super array.
1835 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1836
1837 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1838 eor(result, result, r_super_klass);
1839 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match
1840
1841 // Is there another entry to check? Consult the bitmap.
1842 rorv(r_bitmap, r_bitmap, slot);
1843 // rol(r_bitmap, r_bitmap, 1);
1844 tbz(r_bitmap, 1, L_fallthrough);
1845
1846 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1847 // The next slot to be inspected, by the logic we're about to call,
1848 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1849 // have been checked.
1850 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index,
1851 r_bitmap, r_array_length, result, /*is_stub*/false);
1852
1853 BLOCK_COMMENT("} lookup_secondary_supers_table");
1854
1855 bind(L_fallthrough);
1856
1857 if (VerifySecondarySupers) {
1858 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1859 temp1, temp2, result); // r1, r2, r5
1860 }
1861
1862 if (L_success) {
1863 cbz(result, *L_success);
1864 }
1865 }
1866
1867 // Called by code generated by check_klass_subtype_slow_path
1868 // above. This is called when there is a collision in the hashed
1869 // lookup in the secondary supers array.
1870 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1871 Register r_array_base,
1872 Register r_array_index,
1873 Register r_bitmap,
1874 Register temp1,
1875 Register result,
1876 bool is_stub) {
1877 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1878
1879 const Register
1880 r_array_length = temp1,
1881 r_sub_klass = noreg; // unused
1882
1883 if (is_stub) {
1884 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1885 }
1886
1887 Label L_fallthrough, L_huge;
1888
1889 // Load the array length.
1890 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1891 // And adjust the array base to point to the data.
1892 // NB! Effectively increments current slot index by 1.
1893 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1894 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1895
1896 // The bitmap is full to bursting.
1897 // Implicit invariant: BITMAP_FULL implies (length > 0)
1898 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1899 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2));
1900 br(GT, L_huge);
1901
1902 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
1903 // current slot (at secondary_supers[r_array_index]) has not yet
1904 // been inspected, and r_array_index may be out of bounds if we
1905 // wrapped around the end of the array.
1906
1907 { // This is conventional linear probing, but instead of terminating
1908 // when a null entry is found in the table, we maintain a bitmap
1909 // in which a 0 indicates missing entries.
1910 // As long as the bitmap is not completely full,
1911 // array_length == popcount(bitmap). The array_length check above
1912 // guarantees there are 0s in the bitmap, so the loop eventually
1913 // terminates.
1914 Label L_loop;
1915 bind(L_loop);
1916
1917 // Check for wraparound.
1918 cmp(r_array_index, r_array_length);
1919 csel(r_array_index, zr, r_array_index, GE);
1920
1921 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1922 eor(result, rscratch1, r_super_klass);
1923 cbz(result, L_fallthrough);
1924
1925 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1926
1927 ror(r_bitmap, r_bitmap, 1);
1928 add(r_array_index, r_array_index, 1);
1929 b(L_loop);
1930 }
1931
1932 { // Degenerate case: more than 64 secondary supers.
1933 // FIXME: We could do something smarter here, maybe a vectorized
1934 // comparison or a binary search, but is that worth any added
1935 // complexity?
1936 bind(L_huge);
1937 cmp(sp, zr); // Clear Z flag; SP is never zero
1938 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1939 cset(result, NE); // result == 0 iff we got a match.
1940 }
1941
1942 bind(L_fallthrough);
1943 }
1944
1945 // Make sure that the hashed lookup and a linear scan agree.
1946 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1947 Register r_super_klass,
1948 Register temp1,
1949 Register temp2,
1950 Register result) {
1951 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1952
1953 const Register
1954 r_array_base = temp1,
1955 r_array_length = temp2;
1956
1957 BLOCK_COMMENT("verify_secondary_supers_table {");
1958
1959 // We will consult the secondary-super array.
1960 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1961
1962 // Load the array length.
1963 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1964 // And adjust the array base to point to the data.
1965 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1966
1967 cmp(sp, zr); // Clear Z flag; SP is never zero
1968 // Scan R2 words at [R5] for an occurrence of R0.
1969 // Set NZ/Z based on last compare.
1970 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
1971 // rscratch1 == 0 iff we got a match.
1972 cset(rscratch1, NE);
1973
1974 Label passed;
1975 cmp(result, zr);
1976 cset(result, NE); // normalize result to 0/1 for comparison
1977
1978 cmp(rscratch1, result);
1979 br(EQ, passed);
1980 {
1981 mov(r0, r_super_klass); // r0 <- r0
1982 mov(r1, r_sub_klass); // r1 <- r4
1983 mov(r2, /*expected*/rscratch1); // r2 <- r8
1984 mov(r3, result); // r3 <- r5
1985 mov(r4, (address)("mismatch")); // r4 <- const
1986 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
1987 should_not_reach_here();
1988 }
1989 bind(passed);
1990
1991 BLOCK_COMMENT("} verify_secondary_supers_table");
1992 }
1993
1994 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
1995 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
1996 assert_different_registers(klass, rthread, scratch);
1997
1998 Label L_fallthrough, L_tmp;
1999 if (L_fast_path == nullptr) {
2000 L_fast_path = &L_fallthrough;
2001 } else if (L_slow_path == nullptr) {
2002 L_slow_path = &L_fallthrough;
2003 }
2004 // Fast path check: class is fully initialized
2005 lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
2006 ldarb(scratch, scratch);
2007 cmp(scratch, InstanceKlass::fully_initialized);
2008 br(Assembler::EQ, *L_fast_path);
2009
2010 // Fast path check: current thread is initializer thread
2011 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
2012 cmp(rthread, scratch);
2013
2014 if (L_slow_path == &L_fallthrough) {
2015 br(Assembler::EQ, *L_fast_path);
2016 bind(*L_slow_path);
2017 } else if (L_fast_path == &L_fallthrough) {
2018 br(Assembler::NE, *L_slow_path);
2019 bind(*L_fast_path);
2020 } else {
2021 Unimplemented();
2022 }
2023 }
2024
2025 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
2026 if (!VerifyOops) return;
2027
2028 // Pass register number to verify_oop_subroutine
2029 const char* b = nullptr;
2030 {
2031 ResourceMark rm;
2032 stringStream ss;
2033 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
2034 b = code_string(ss.as_string());
2035 }
2036 BLOCK_COMMENT("verify_oop {");
2037
2038 strip_return_address(); // This might happen within a stack frame.
2039 protect_return_address();
2040 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2041 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2042
2043 mov(r0, reg);
2044 movptr(rscratch1, (uintptr_t)(address)b);
2045
2046 // call indirectly to solve generation ordering problem
2047 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2048 ldr(rscratch2, Address(rscratch2));
2049 blr(rscratch2);
2050
2051 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2052 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2053 authenticate_return_address();
2054
2055 BLOCK_COMMENT("} verify_oop");
2056 }
2057
2058 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
2059 if (!VerifyOops) return;
2060
2061 const char* b = nullptr;
2062 {
2063 ResourceMark rm;
2064 stringStream ss;
2065 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
2066 b = code_string(ss.as_string());
2067 }
2068 BLOCK_COMMENT("verify_oop_addr {");
2069
2070 strip_return_address(); // This might happen within a stack frame.
2071 protect_return_address();
2072 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2073 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2074
2075 // addr may contain sp so we will have to adjust it based on the
2076 // pushes that we just did.
2077 if (addr.uses(sp)) {
2078 lea(r0, addr);
2079 ldr(r0, Address(r0, 4 * wordSize));
2080 } else {
2081 ldr(r0, addr);
2082 }
2083 movptr(rscratch1, (uintptr_t)(address)b);
2084
2085 // call indirectly to solve generation ordering problem
2086 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2087 ldr(rscratch2, Address(rscratch2));
2088 blr(rscratch2);
2089
2090 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2091 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2092 authenticate_return_address();
2093
2094 BLOCK_COMMENT("} verify_oop_addr");
2095 }
2096
2097 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2098 int extra_slot_offset) {
2099 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2100 int stackElementSize = Interpreter::stackElementSize;
2101 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
2102 #ifdef ASSERT
2103 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2104 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2105 #endif
2106 if (arg_slot.is_constant()) {
2107 return Address(esp, arg_slot.as_constant() * stackElementSize
2108 + offset);
2109 } else {
2110 add(rscratch1, esp, arg_slot.as_register(),
2111 ext::uxtx, exact_log2(stackElementSize));
2112 return Address(rscratch1, offset);
2113 }
2114 }
2115
2116 // Handle the receiver type profile update given the "recv" klass.
2117 //
2118 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
2119 // If there are no matching or claimable receiver entries in RD, updates
2120 // the polymorphic counter.
2121 //
2122 // This code expected to run by either the interpreter or JIT-ed code, without
2123 // extra synchronization. For safety, receiver cells are claimed atomically, which
2124 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
2125 // counter updates are not atomic.
2126 //
2127 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
2128 assert_different_registers(recv, mdp, rscratch1, rscratch2);
2129
2130 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
2131 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
2132 int poly_count_offset = in_bytes(CounterData::count_offset());
2133 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
2134 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
2135
2136 // Adjust for MDP offsets.
2137 base_receiver_offset += mdp_offset;
2138 end_receiver_offset += mdp_offset;
2139 poly_count_offset += mdp_offset;
2140
2141 #ifdef ASSERT
2142 // We are about to walk the MDO slots without asking for offsets.
2143 // Check that our math hits all the right spots.
2144 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
2145 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
2146 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
2147 int offset = base_receiver_offset + receiver_step*c;
2148 int count_offset = offset + receiver_to_count_step;
2149 assert(offset == real_recv_offset, "receiver slot math");
2150 assert(count_offset == real_count_offset, "receiver count math");
2151 }
2152 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
2153 assert(poly_count_offset == real_poly_count_offset, "poly counter math");
2154 #endif
2155
2156 // Corner case: no profile table. Increment poly counter and exit.
2157 if (ReceiverTypeData::row_limit() == 0) {
2158 increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
2159 return;
2160 }
2161
2162 Register offset = rscratch2;
2163
2164 Label L_loop_search_receiver, L_loop_search_empty;
2165 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
2166
2167 // The code here recognizes three major cases:
2168 // A. Fastest: receiver found in the table
2169 // B. Fast: no receiver in the table, and the table is full
2170 // C. Slow: no receiver in the table, free slots in the table
2171 //
2172 // The case A performance is most important, as perfectly-behaved code would end up
2173 // there, especially with larger TypeProfileWidth. The case B performance is
2174 // important as well, this is where bulk of code would land for normally megamorphic
2175 // cases. The case C performance is not essential, its job is to deal with installation
2176 // races, we optimize for code density instead. Case C needs to make sure that receiver
2177 // rows are only claimed once. This makes sure we never overwrite a row for another
2178 // receiver and never duplicate the receivers in the list, making profile type-accurate.
2179 //
2180 // It is very tempting to handle these cases in a single loop, and claim the first slot
2181 // without checking the rest of the table. But, profiling code should tolerate free slots
2182 // in the table, as class unloading can clear them. After such cleanup, the receiver
2183 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
2184 // to complete, before trying to install new slots. Splitting the code in several tight
2185 // loops also helpfully optimizes for cases A and B.
2186 //
2187 // This code is effectively:
2188 //
2189 // restart:
2190 // // Fastest: receiver is already installed
2191 // for (i = 0; i < receiver_count(); i++) {
2192 // if (receiver(i) == recv) goto found_recv(i);
2193 // }
2194 //
2195 // // Fast: no receiver, but profile is full
2196 // for (i = 0; i < receiver_count(); i++) {
2197 // if (receiver(i) == null) goto found_null(i);
2198 // }
2199 // goto polymorphic
2200 //
2201 // // Slow: try to install receiver
2202 // found_null(i):
2203 // CAS(&receiver(i), null, recv);
2204 // goto restart
2205 //
2206 // polymorphic:
2207 // count++;
2208 // return
2209 //
2210 // found_recv(i):
2211 // *receiver_count(i)++
2212 //
2213
2214 bind(L_restart);
2215
2216 // Fastest: receiver is already installed
2217 mov(offset, base_receiver_offset);
2218 bind(L_loop_search_receiver);
2219 ldr(rscratch1, Address(mdp, offset));
2220 cmp(rscratch1, recv);
2221 br(Assembler::EQ, L_found_recv);
2222 add(offset, offset, receiver_step);
2223 sub(rscratch1, offset, end_receiver_offset);
2224 cbnz(rscratch1, L_loop_search_receiver);
2225
2226 // Fast: no receiver, but profile is full
2227 mov(offset, base_receiver_offset);
2228 bind(L_loop_search_empty);
2229 ldr(rscratch1, Address(mdp, offset));
2230 cbz(rscratch1, L_found_empty);
2231 add(offset, offset, receiver_step);
2232 sub(rscratch1, offset, end_receiver_offset);
2233 cbnz(rscratch1, L_loop_search_empty);
2234 b(L_polymorphic);
2235
2236 // Slow: try to install receiver
2237 bind(L_found_empty);
2238
2239 // Atomically swing receiver slot: null -> recv.
2240 //
2241 // The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
2242 // is used to hold the destination address. This is safe because the
2243 // offset is no longer needed after the address is computed.
2244
2245 lea(rscratch2, Address(mdp, offset));
2246 cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
2247 /*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
2248
2249 // CAS success means the slot now has the receiver we want. CAS failure means
2250 // something had claimed the slot concurrently: it can be the same receiver we want,
2251 // or something else. Since this is a slow path, we can optimize for code density,
2252 // and just restart the search from the beginning.
2253 b(L_restart);
2254
2255 // Counter updates:
2256
2257 // Increment polymorphic counter instead of receiver slot.
2258 bind(L_polymorphic);
2259 mov(offset, poly_count_offset);
2260 b(L_count_update);
2261
2262 // Found a receiver, convert its slot offset to corresponding count offset.
2263 bind(L_found_recv);
2264 add(offset, offset, receiver_to_count_step);
2265
2266 bind(L_count_update);
2267 increment(Address(mdp, offset), DataLayout::counter_increment);
2268 }
2269
2270
2271 void MacroAssembler::call_VM_leaf_base(address entry_point,
2272 int number_of_arguments,
2273 Label *retaddr) {
2274 Label E, L;
2275
2276 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2277
2278 mov(rscratch1, RuntimeAddress(entry_point));
2279 blr(rscratch1);
2280 if (retaddr)
2281 bind(*retaddr);
2282
2283 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2284 }
2285
2286 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2287 call_VM_leaf_base(entry_point, number_of_arguments);
2288 }
2289
2290 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2291 pass_arg0(this, arg_0);
2292 call_VM_leaf_base(entry_point, 1);
2293 }
2294
2295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2296 assert_different_registers(arg_1, c_rarg0);
2297 pass_arg0(this, arg_0);
2298 pass_arg1(this, arg_1);
2299 call_VM_leaf_base(entry_point, 2);
2300 }
2301
2302 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
2303 Register arg_1, Register arg_2) {
2304 assert_different_registers(arg_1, c_rarg0);
2305 assert_different_registers(arg_2, c_rarg0, c_rarg1);
2306 pass_arg0(this, arg_0);
2307 pass_arg1(this, arg_1);
2308 pass_arg2(this, arg_2);
2309 call_VM_leaf_base(entry_point, 3);
2310 }
2311
2312 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2313 pass_arg0(this, arg_0);
2314 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2315 }
2316
2317 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2318
2319 assert_different_registers(arg_0, c_rarg1);
2320 pass_arg1(this, arg_1);
2321 pass_arg0(this, arg_0);
2322 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2323 }
2324
2325 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2326 assert_different_registers(arg_0, c_rarg1, c_rarg2);
2327 assert_different_registers(arg_1, c_rarg2);
2328 pass_arg2(this, arg_2);
2329 pass_arg1(this, arg_1);
2330 pass_arg0(this, arg_0);
2331 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2332 }
2333
2334 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2335 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2336 assert_different_registers(arg_1, c_rarg2, c_rarg3);
2337 assert_different_registers(arg_2, c_rarg3);
2338 pass_arg3(this, arg_3);
2339 pass_arg2(this, arg_2);
2340 pass_arg1(this, arg_1);
2341 pass_arg0(this, arg_0);
2342 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2343 }
2344
2345 void MacroAssembler::null_check(Register reg, int offset) {
2346 if (needs_explicit_null_check(offset)) {
2347 // provoke OS null exception if reg is null by
2348 // accessing M[reg] w/o changing any registers
2349 // NOTE: this is plenty to provoke a segv
2350 ldr(zr, Address(reg));
2351 } else {
2352 // nothing to do, (later) access of M[reg + offset]
2353 // will provoke OS null exception if reg is null
2354 }
2355 }
2356
2357 // MacroAssembler protected routines needed to implement
2358 // public methods
2359
2360 void MacroAssembler::mov(Register r, Address dest) {
2361 code_section()->relocate(pc(), dest.rspec());
2362 uint64_t imm64 = (uint64_t)dest.target();
2363 movptr(r, imm64);
2364 }
2365
2366 // Move a constant pointer into r. In AArch64 mode the virtual
2367 // address space is 48 bits in size, so we only need three
2368 // instructions to create a patchable instruction sequence that can
2369 // reach anywhere.
2370 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2371 #ifndef PRODUCT
2372 {
2373 char buffer[64];
2374 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2375 block_comment(buffer);
2376 }
2377 #endif
2378 assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2379 movz(r, imm64 & 0xffff);
2380 imm64 >>= 16;
2381 movk(r, imm64 & 0xffff, 16);
2382 imm64 >>= 16;
2383 movk(r, imm64 & 0xffff, 32);
2384 }
2385
2386 // Macro to mov replicated immediate to vector register.
2387 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2388 // the upper 56/48/32 bits must be zeros for B/H/S type.
2389 // Vd will get the following values for different arrangements in T
2390 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh
2391 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh
2392 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh
2393 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh
2394 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
2395 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
2396 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh
2397 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh
2398 // Clobbers rscratch1
2399 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2400 assert(T != T1Q, "unsupported");
2401 if (T == T1D || T == T2D) {
2402 int imm = operand_valid_for_movi_immediate(imm64, T);
2403 if (-1 != imm) {
2404 movi(Vd, T, imm);
2405 } else {
2406 mov(rscratch1, imm64);
2407 dup(Vd, T, rscratch1);
2408 }
2409 return;
2410 }
2411
2412 #ifdef ASSERT
2413 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2414 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2415 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2416 #endif
2417 int shift = operand_valid_for_movi_immediate(imm64, T);
2418 uint32_t imm32 = imm64 & 0xffffffffULL;
2419 if (shift >= 0) {
2420 movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2421 } else {
2422 movw(rscratch1, imm32);
2423 dup(Vd, T, rscratch1);
2424 }
2425 }
2426
2427 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2428 {
2429 #ifndef PRODUCT
2430 {
2431 char buffer[64];
2432 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2433 block_comment(buffer);
2434 }
2435 #endif
2436 if (operand_valid_for_logical_immediate(false, imm64)) {
2437 orr(dst, zr, imm64);
2438 } else {
2439 // we can use a combination of MOVZ or MOVN with
2440 // MOVK to build up the constant
2441 uint64_t imm_h[4];
2442 int zero_count = 0;
2443 int neg_count = 0;
2444 int i;
2445 for (i = 0; i < 4; i++) {
2446 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2447 if (imm_h[i] == 0) {
2448 zero_count++;
2449 } else if (imm_h[i] == 0xffffL) {
2450 neg_count++;
2451 }
2452 }
2453 if (zero_count == 4) {
2454 // one MOVZ will do
2455 movz(dst, 0);
2456 } else if (neg_count == 4) {
2457 // one MOVN will do
2458 movn(dst, 0);
2459 } else if (zero_count == 3) {
2460 for (i = 0; i < 4; i++) {
2461 if (imm_h[i] != 0L) {
2462 movz(dst, (uint32_t)imm_h[i], (i << 4));
2463 break;
2464 }
2465 }
2466 } else if (neg_count == 3) {
2467 // one MOVN will do
2468 for (int i = 0; i < 4; i++) {
2469 if (imm_h[i] != 0xffffL) {
2470 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2471 break;
2472 }
2473 }
2474 } else if (zero_count == 2) {
2475 // one MOVZ and one MOVK will do
2476 for (i = 0; i < 3; i++) {
2477 if (imm_h[i] != 0L) {
2478 movz(dst, (uint32_t)imm_h[i], (i << 4));
2479 i++;
2480 break;
2481 }
2482 }
2483 for (;i < 4; i++) {
2484 if (imm_h[i] != 0L) {
2485 movk(dst, (uint32_t)imm_h[i], (i << 4));
2486 }
2487 }
2488 } else if (neg_count == 2) {
2489 // one MOVN and one MOVK will do
2490 for (i = 0; i < 4; i++) {
2491 if (imm_h[i] != 0xffffL) {
2492 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2493 i++;
2494 break;
2495 }
2496 }
2497 for (;i < 4; i++) {
2498 if (imm_h[i] != 0xffffL) {
2499 movk(dst, (uint32_t)imm_h[i], (i << 4));
2500 }
2501 }
2502 } else if (zero_count == 1) {
2503 // one MOVZ and two MOVKs will do
2504 for (i = 0; i < 4; i++) {
2505 if (imm_h[i] != 0L) {
2506 movz(dst, (uint32_t)imm_h[i], (i << 4));
2507 i++;
2508 break;
2509 }
2510 }
2511 for (;i < 4; i++) {
2512 if (imm_h[i] != 0x0L) {
2513 movk(dst, (uint32_t)imm_h[i], (i << 4));
2514 }
2515 }
2516 } else if (neg_count == 1) {
2517 // one MOVN and two MOVKs will do
2518 for (i = 0; i < 4; i++) {
2519 if (imm_h[i] != 0xffffL) {
2520 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2521 i++;
2522 break;
2523 }
2524 }
2525 for (;i < 4; i++) {
2526 if (imm_h[i] != 0xffffL) {
2527 movk(dst, (uint32_t)imm_h[i], (i << 4));
2528 }
2529 }
2530 } else {
2531 // use a MOVZ and 3 MOVKs (makes it easier to debug)
2532 movz(dst, (uint32_t)imm_h[0], 0);
2533 for (i = 1; i < 4; i++) {
2534 movk(dst, (uint32_t)imm_h[i], (i << 4));
2535 }
2536 }
2537 }
2538 }
2539
2540 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2541 {
2542 #ifndef PRODUCT
2543 {
2544 char buffer[64];
2545 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2546 block_comment(buffer);
2547 }
2548 #endif
2549 if (operand_valid_for_logical_immediate(true, imm32)) {
2550 orrw(dst, zr, imm32);
2551 } else {
2552 // we can use MOVZ, MOVN or two calls to MOVK to build up the
2553 // constant
2554 uint32_t imm_h[2];
2555 imm_h[0] = imm32 & 0xffff;
2556 imm_h[1] = ((imm32 >> 16) & 0xffff);
2557 if (imm_h[0] == 0) {
2558 movzw(dst, imm_h[1], 16);
2559 } else if (imm_h[0] == 0xffff) {
2560 movnw(dst, imm_h[1] ^ 0xffff, 16);
2561 } else if (imm_h[1] == 0) {
2562 movzw(dst, imm_h[0], 0);
2563 } else if (imm_h[1] == 0xffff) {
2564 movnw(dst, imm_h[0] ^ 0xffff, 0);
2565 } else {
2566 // use a MOVZ and MOVK (makes it easier to debug)
2567 movzw(dst, imm_h[0], 0);
2568 movkw(dst, imm_h[1], 16);
2569 }
2570 }
2571 }
2572
2573 // Form an address from base + offset in Rd. Rd may or may
2574 // not actually be used: you must use the Address that is returned.
2575 // It is up to you to ensure that the shift provided matches the size
2576 // of your data.
2577 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2578 if (Address::offset_ok_for_immed(byte_offset, shift))
2579 // It fits; no need for any heroics
2580 return Address(base, byte_offset);
2581
2582 // Don't do anything clever with negative or misaligned offsets
2583 unsigned mask = (1 << shift) - 1;
2584 if (byte_offset < 0 || byte_offset & mask) {
2585 mov(Rd, byte_offset);
2586 add(Rd, base, Rd);
2587 return Address(Rd);
2588 }
2589
2590 // See if we can do this with two 12-bit offsets
2591 {
2592 uint64_t word_offset = byte_offset >> shift;
2593 uint64_t masked_offset = word_offset & 0xfff000;
2594 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2595 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2596 add(Rd, base, masked_offset << shift);
2597 word_offset -= masked_offset;
2598 return Address(Rd, word_offset << shift);
2599 }
2600 }
2601
2602 // Do it the hard way
2603 mov(Rd, byte_offset);
2604 add(Rd, base, Rd);
2605 return Address(Rd);
2606 }
2607
2608 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2609 bool want_remainder, Register scratch)
2610 {
2611 // Full implementation of Java idiv and irem. The function
2612 // returns the (pc) offset of the div instruction - may be needed
2613 // for implicit exceptions.
2614 //
2615 // constraint : ra/rb =/= scratch
2616 // normal case
2617 //
2618 // input : ra: dividend
2619 // rb: divisor
2620 //
2621 // result: either
2622 // quotient (= ra idiv rb)
2623 // remainder (= ra irem rb)
2624
2625 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2626
2627 int idivl_offset = offset();
2628 if (! want_remainder) {
2629 sdivw(result, ra, rb);
2630 } else {
2631 sdivw(scratch, ra, rb);
2632 Assembler::msubw(result, scratch, rb, ra);
2633 }
2634
2635 return idivl_offset;
2636 }
2637
2638 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2639 bool want_remainder, Register scratch)
2640 {
2641 // Full implementation of Java ldiv and lrem. The function
2642 // returns the (pc) offset of the div instruction - may be needed
2643 // for implicit exceptions.
2644 //
2645 // constraint : ra/rb =/= scratch
2646 // normal case
2647 //
2648 // input : ra: dividend
2649 // rb: divisor
2650 //
2651 // result: either
2652 // quotient (= ra idiv rb)
2653 // remainder (= ra irem rb)
2654
2655 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2656
2657 int idivq_offset = offset();
2658 if (! want_remainder) {
2659 sdiv(result, ra, rb);
2660 } else {
2661 sdiv(scratch, ra, rb);
2662 Assembler::msub(result, scratch, rb, ra);
2663 }
2664
2665 return idivq_offset;
2666 }
2667
2668 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2669 address prev = pc() - NativeMembar::instruction_size;
2670 address last = code()->last_insn();
2671 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2672 NativeMembar *bar = NativeMembar_at(prev);
2673 if (AlwaysMergeDMB) {
2674 bar->set_kind(bar->get_kind() | order_constraint);
2675 BLOCK_COMMENT("merged membar(always)");
2676 return;
2677 }
2678 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2679 // doing so would introduce a StoreLoad which the caller did not
2680 // intend
2681 if (bar->get_kind() == order_constraint
2682 || bar->get_kind() == AnyAny
2683 || order_constraint == AnyAny) {
2684 // We are merging two memory barrier instructions. On AArch64 we
2685 // can do this simply by ORing them together.
2686 bar->set_kind(bar->get_kind() | order_constraint);
2687 BLOCK_COMMENT("merged membar");
2688 return;
2689 } else {
2690 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2691 // We need check the last 2 instructions
2692 address prev2 = prev - NativeMembar::instruction_size;
2693 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2694 NativeMembar *bar2 = NativeMembar_at(prev2);
2695 assert(bar2->get_kind() == order_constraint, "it should be merged before");
2696 BLOCK_COMMENT("merged membar(elided)");
2697 return;
2698 }
2699 }
2700 }
2701 code()->set_last_insn(pc());
2702 dmb(Assembler::barrier(order_constraint));
2703 }
2704
2705 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2706 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2707 merge_ldst(rt, adr, size_in_bytes, is_store);
2708 code()->clear_last_insn();
2709 return true;
2710 } else {
2711 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2712 const uint64_t mask = size_in_bytes - 1;
2713 if (adr.getMode() == Address::base_plus_offset &&
2714 (adr.offset() & mask) == 0) { // only supports base_plus_offset.
2715 code()->set_last_insn(pc());
2716 }
2717 return false;
2718 }
2719 }
2720
2721 void MacroAssembler::ldr(Register Rx, const Address &adr) {
2722 // We always try to merge two adjacent loads into one ldp.
2723 if (!try_merge_ldst(Rx, adr, 8, false)) {
2724 Assembler::ldr(Rx, adr);
2725 }
2726 }
2727
2728 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2729 // We always try to merge two adjacent loads into one ldp.
2730 if (!try_merge_ldst(Rw, adr, 4, false)) {
2731 Assembler::ldrw(Rw, adr);
2732 }
2733 }
2734
2735 void MacroAssembler::str(Register Rx, const Address &adr) {
2736 // We always try to merge two adjacent stores into one stp.
2737 if (!try_merge_ldst(Rx, adr, 8, true)) {
2738 Assembler::str(Rx, adr);
2739 }
2740 }
2741
2742 void MacroAssembler::strw(Register Rw, const Address &adr) {
2743 // We always try to merge two adjacent stores into one stp.
2744 if (!try_merge_ldst(Rw, adr, 4, true)) {
2745 Assembler::strw(Rw, adr);
2746 }
2747 }
2748
2749 // MacroAssembler routines found actually to be needed
2750
2751 void MacroAssembler::push(Register src)
2752 {
2753 str(src, Address(pre(esp, -1 * wordSize)));
2754 }
2755
2756 void MacroAssembler::pop(Register dst)
2757 {
2758 ldr(dst, Address(post(esp, 1 * wordSize)));
2759 }
2760
2761 // Note: load_unsigned_short used to be called load_unsigned_word.
2762 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2763 int off = offset();
2764 ldrh(dst, src);
2765 return off;
2766 }
2767
2768 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2769 int off = offset();
2770 ldrb(dst, src);
2771 return off;
2772 }
2773
2774 int MacroAssembler::load_signed_short(Register dst, Address src) {
2775 int off = offset();
2776 ldrsh(dst, src);
2777 return off;
2778 }
2779
2780 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2781 int off = offset();
2782 ldrsb(dst, src);
2783 return off;
2784 }
2785
2786 int MacroAssembler::load_signed_short32(Register dst, Address src) {
2787 int off = offset();
2788 ldrshw(dst, src);
2789 return off;
2790 }
2791
2792 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2793 int off = offset();
2794 ldrsbw(dst, src);
2795 return off;
2796 }
2797
2798 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2799 switch (size_in_bytes) {
2800 case 8: ldr(dst, src); break;
2801 case 4: ldrw(dst, src); break;
2802 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2803 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2804 default: ShouldNotReachHere();
2805 }
2806 }
2807
2808 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2809 switch (size_in_bytes) {
2810 case 8: str(src, dst); break;
2811 case 4: strw(src, dst); break;
2812 case 2: strh(src, dst); break;
2813 case 1: strb(src, dst); break;
2814 default: ShouldNotReachHere();
2815 }
2816 }
2817
2818 void MacroAssembler::narrow_subword_type(Register reg, BasicType bt) {
2819 assert(is_subword_type(bt), "required");
2820 switch (bt) {
2821 case T_BOOLEAN: andw(reg, reg, 1); break;
2822 case T_BYTE: sxtbw(reg, reg); break;
2823 case T_CHAR: uxthw(reg, reg); break;
2824 case T_SHORT: sxthw(reg, reg); break;
2825 default: ShouldNotReachHere();
2826 }
2827 }
2828
2829 void MacroAssembler::decrementw(Register reg, int value)
2830 {
2831 if (value < 0) { incrementw(reg, -value); return; }
2832 if (value == 0) { return; }
2833 if (value < (1 << 12)) { subw(reg, reg, value); return; }
2834 /* else */ {
2835 guarantee(reg != rscratch2, "invalid dst for register decrement");
2836 movw(rscratch2, (unsigned)value);
2837 subw(reg, reg, rscratch2);
2838 }
2839 }
2840
2841 void MacroAssembler::decrement(Register reg, int value)
2842 {
2843 if (value < 0) { increment(reg, -value); return; }
2844 if (value == 0) { return; }
2845 if (value < (1 << 12)) { sub(reg, reg, value); return; }
2846 /* else */ {
2847 assert(reg != rscratch2, "invalid dst for register decrement");
2848 mov(rscratch2, (uint64_t)value);
2849 sub(reg, reg, rscratch2);
2850 }
2851 }
2852
2853 void MacroAssembler::decrementw(Address dst, int value)
2854 {
2855 assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2856 if (dst.getMode() == Address::literal) {
2857 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2858 lea(rscratch2, dst);
2859 dst = Address(rscratch2);
2860 }
2861 ldrw(rscratch1, dst);
2862 decrementw(rscratch1, value);
2863 strw(rscratch1, dst);
2864 }
2865
2866 void MacroAssembler::decrement(Address dst, int value)
2867 {
2868 assert(!dst.uses(rscratch1), "invalid address for decrement");
2869 if (dst.getMode() == Address::literal) {
2870 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2871 lea(rscratch2, dst);
2872 dst = Address(rscratch2);
2873 }
2874 ldr(rscratch1, dst);
2875 decrement(rscratch1, value);
2876 str(rscratch1, dst);
2877 }
2878
2879 void MacroAssembler::incrementw(Register reg, int value)
2880 {
2881 if (value < 0) { decrementw(reg, -value); return; }
2882 if (value == 0) { return; }
2883 if (value < (1 << 12)) { addw(reg, reg, value); return; }
2884 /* else */ {
2885 assert(reg != rscratch2, "invalid dst for register increment");
2886 movw(rscratch2, (unsigned)value);
2887 addw(reg, reg, rscratch2);
2888 }
2889 }
2890
2891 void MacroAssembler::increment(Register reg, int value)
2892 {
2893 if (value < 0) { decrement(reg, -value); return; }
2894 if (value == 0) { return; }
2895 if (value < (1 << 12)) { add(reg, reg, value); return; }
2896 /* else */ {
2897 assert(reg != rscratch2, "invalid dst for register increment");
2898 movw(rscratch2, (unsigned)value);
2899 add(reg, reg, rscratch2);
2900 }
2901 }
2902
2903 void MacroAssembler::incrementw(Address dst, int value)
2904 {
2905 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2906 if (dst.getMode() == Address::literal) {
2907 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2908 lea(rscratch2, dst);
2909 dst = Address(rscratch2);
2910 }
2911 ldrw(rscratch1, dst);
2912 incrementw(rscratch1, value);
2913 strw(rscratch1, dst);
2914 }
2915
2916 void MacroAssembler::increment(Address dst, int value)
2917 {
2918 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2919 if (dst.getMode() == Address::literal) {
2920 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2921 lea(rscratch2, dst);
2922 dst = Address(rscratch2);
2923 }
2924 ldr(rscratch1, dst);
2925 increment(rscratch1, value);
2926 str(rscratch1, dst);
2927 }
2928
2929 // Push lots of registers in the bit set supplied. Don't push sp.
2930 // Return the number of words pushed
2931 int MacroAssembler::push(RegSet regset, Register stack) {
2932 if (regset.bits() == 0) {
2933 return 0;
2934 }
2935 auto bitset = integer_cast<unsigned int>(regset.bits());
2936 int words_pushed = 0;
2937
2938 // Scan bitset to accumulate register pairs
2939 unsigned char regs[32];
2940 int count = 0;
2941 for (int reg = 0; reg <= 30; reg++) {
2942 if (1 & bitset)
2943 regs[count++] = reg;
2944 bitset >>= 1;
2945 }
2946 regs[count++] = zr->raw_encoding();
2947 count &= ~1; // Only push an even number of regs
2948
2949 if (count) {
2950 stp(as_Register(regs[0]), as_Register(regs[1]),
2951 Address(pre(stack, -count * wordSize)));
2952 words_pushed += 2;
2953 }
2954 for (int i = 2; i < count; i += 2) {
2955 stp(as_Register(regs[i]), as_Register(regs[i+1]),
2956 Address(stack, i * wordSize));
2957 words_pushed += 2;
2958 }
2959
2960 assert(words_pushed == count, "oops, pushed != count");
2961
2962 return count;
2963 }
2964
2965 int MacroAssembler::pop(RegSet regset, Register stack) {
2966 if (regset.bits() == 0) {
2967 return 0;
2968 }
2969 auto bitset = integer_cast<unsigned int>(regset.bits());
2970 int words_pushed = 0;
2971
2972 // Scan bitset to accumulate register pairs
2973 unsigned char regs[32];
2974 int count = 0;
2975 for (int reg = 0; reg <= 30; reg++) {
2976 if (1 & bitset)
2977 regs[count++] = reg;
2978 bitset >>= 1;
2979 }
2980 regs[count++] = zr->raw_encoding();
2981 count &= ~1;
2982
2983 for (int i = 2; i < count; i += 2) {
2984 ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2985 Address(stack, i * wordSize));
2986 words_pushed += 2;
2987 }
2988 if (count) {
2989 ldp(as_Register(regs[0]), as_Register(regs[1]),
2990 Address(post(stack, count * wordSize)));
2991 words_pushed += 2;
2992 }
2993
2994 assert(words_pushed == count, "oops, pushed != count");
2995
2996 return count;
2997 }
2998
2999 // Push lots of registers in the bit set supplied. Don't push sp.
3000 // Return the number of dwords pushed
3001 int MacroAssembler::push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
3002 if (regset.bits() == 0) {
3003 return 0;
3004 }
3005 auto bitset = integer_cast<unsigned int>(regset.bits());
3006 int words_pushed = 0;
3007 bool use_sve = false;
3008 int sve_vector_size_in_bytes = 0;
3009
3010 #ifdef COMPILER2
3011 use_sve = Matcher::supports_scalable_vector();
3012 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3013 #endif
3014
3015 // Scan bitset to accumulate register pairs
3016 unsigned char regs[32];
3017 int count = 0;
3018 for (int reg = 0; reg <= 31; reg++) {
3019 if (1 & bitset)
3020 regs[count++] = reg;
3021 bitset >>= 1;
3022 }
3023
3024 if (count == 0) {
3025 return 0;
3026 }
3027
3028 if (mode == PushPopFull) {
3029 if (use_sve && sve_vector_size_in_bytes > 16) {
3030 mode = PushPopSVE;
3031 } else {
3032 mode = PushPopNeon;
3033 }
3034 }
3035
3036 #ifndef PRODUCT
3037 {
3038 char buffer[48];
3039 if (mode == PushPopSVE) {
3040 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
3041 } else if (mode == PushPopNeon) {
3042 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
3043 } else {
3044 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
3045 }
3046 block_comment(buffer);
3047 }
3048 #endif
3049
3050 if (mode == PushPopSVE) {
3051 sub(stack, stack, sve_vector_size_in_bytes * count);
3052 for (int i = 0; i < count; i++) {
3053 sve_str(as_FloatRegister(regs[i]), Address(stack, i));
3054 }
3055 return count * sve_vector_size_in_bytes / 8;
3056 }
3057
3058 if (mode == PushPopNeon) {
3059 if (count == 1) {
3060 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
3061 return 2;
3062 }
3063
3064 bool odd = (count & 1) == 1;
3065 int push_slots = count + (odd ? 1 : 0);
3066
3067 // Always pushing full 128 bit registers.
3068 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
3069 words_pushed += 2;
3070
3071 for (int i = 2; i + 1 < count; i += 2) {
3072 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3073 words_pushed += 2;
3074 }
3075
3076 if (odd) {
3077 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3078 words_pushed++;
3079 }
3080
3081 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3082 return count * 2;
3083 }
3084
3085 if (mode == PushPopFp) {
3086 bool odd = (count & 1) == 1;
3087 int push_slots = count + (odd ? 1 : 0);
3088
3089 if (count == 1) {
3090 // Stack pointer must be 16 bytes aligned
3091 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
3092 return 1;
3093 }
3094
3095 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
3096 words_pushed += 2;
3097
3098 for (int i = 2; i + 1 < count; i += 2) {
3099 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3100 words_pushed += 2;
3101 }
3102
3103 if (odd) {
3104 // Stack pointer must be 16 bytes aligned
3105 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3106 words_pushed++;
3107 }
3108
3109 assert(words_pushed == count, "oops, pushed != count");
3110
3111 return count;
3112 }
3113
3114 return 0;
3115 }
3116
3117 // Return the number of dwords popped
3118 int MacroAssembler::pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
3119 if (regset.bits() == 0) {
3120 return 0;
3121 }
3122 auto bitset = integer_cast<unsigned int>(regset.bits());
3123 int words_pushed = 0;
3124 bool use_sve = false;
3125 int sve_vector_size_in_bytes = 0;
3126
3127 #ifdef COMPILER2
3128 use_sve = Matcher::supports_scalable_vector();
3129 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3130 #endif
3131 // Scan bitset to accumulate register pairs
3132 unsigned char regs[32];
3133 int count = 0;
3134 for (int reg = 0; reg <= 31; reg++) {
3135 if (1 & bitset)
3136 regs[count++] = reg;
3137 bitset >>= 1;
3138 }
3139
3140 if (count == 0) {
3141 return 0;
3142 }
3143
3144 if (mode == PushPopFull) {
3145 if (use_sve && sve_vector_size_in_bytes > 16) {
3146 mode = PushPopSVE;
3147 } else {
3148 mode = PushPopNeon;
3149 }
3150 }
3151
3152 #ifndef PRODUCT
3153 {
3154 char buffer[48];
3155 if (mode == PushPopSVE) {
3156 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
3157 } else if (mode == PushPopNeon) {
3158 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
3159 } else {
3160 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
3161 }
3162 block_comment(buffer);
3163 }
3164 #endif
3165
3166 if (mode == PushPopSVE) {
3167 for (int i = count - 1; i >= 0; i--) {
3168 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
3169 }
3170 add(stack, stack, sve_vector_size_in_bytes * count);
3171 return count * sve_vector_size_in_bytes / 8;
3172 }
3173
3174 if (mode == PushPopNeon) {
3175 if (count == 1) {
3176 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
3177 return 2;
3178 }
3179
3180 bool odd = (count & 1) == 1;
3181 int push_slots = count + (odd ? 1 : 0);
3182
3183 if (odd) {
3184 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3185 words_pushed++;
3186 }
3187
3188 for (int i = 2; i + 1 < count; i += 2) {
3189 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3190 words_pushed += 2;
3191 }
3192
3193 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
3194 words_pushed += 2;
3195
3196 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3197
3198 return count * 2;
3199 }
3200
3201 if (mode == PushPopFp) {
3202 bool odd = (count & 1) == 1;
3203 int push_slots = count + (odd ? 1 : 0);
3204
3205 if (count == 1) {
3206 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
3207 return 1;
3208 }
3209
3210 if (odd) {
3211 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3212 words_pushed++;
3213 }
3214
3215 for (int i = 2; i + 1 < count; i += 2) {
3216 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3217 words_pushed += 2;
3218 }
3219
3220 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
3221 words_pushed += 2;
3222
3223 assert(words_pushed == count, "oops, pushed != count");
3224
3225 return count;
3226 }
3227
3228 return 0;
3229 }
3230
3231 // Return the number of dwords pushed
3232 int MacroAssembler::push_p(PRegSet regset, Register stack) {
3233 if (regset.bits() == 0) {
3234 return 0;
3235 }
3236 auto bitset = integer_cast<unsigned int>(regset.bits());
3237 bool use_sve = false;
3238 int sve_predicate_size_in_slots = 0;
3239
3240 #ifdef COMPILER2
3241 use_sve = Matcher::supports_scalable_vector();
3242 if (use_sve) {
3243 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3244 }
3245 #endif
3246
3247 if (!use_sve) {
3248 return 0;
3249 }
3250
3251 unsigned char regs[PRegister::number_of_registers];
3252 int count = 0;
3253 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3254 if (1 & bitset)
3255 regs[count++] = reg;
3256 bitset >>= 1;
3257 }
3258
3259 if (count == 0) {
3260 return 0;
3261 }
3262
3263 int total_push_bytes = align_up(sve_predicate_size_in_slots *
3264 VMRegImpl::stack_slot_size * count, 16);
3265 sub(stack, stack, total_push_bytes);
3266 for (int i = 0; i < count; i++) {
3267 sve_str(as_PRegister(regs[i]), Address(stack, i));
3268 }
3269 return total_push_bytes / 8;
3270 }
3271
3272 // Return the number of dwords popped
3273 int MacroAssembler::pop_p(PRegSet regset, Register stack) {
3274 if (regset.bits() == 0) {
3275 return 0;
3276 }
3277 auto bitset = integer_cast<unsigned int>(regset.bits());
3278 bool use_sve = false;
3279 int sve_predicate_size_in_slots = 0;
3280
3281 #ifdef COMPILER2
3282 use_sve = Matcher::supports_scalable_vector();
3283 if (use_sve) {
3284 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3285 }
3286 #endif
3287
3288 if (!use_sve) {
3289 return 0;
3290 }
3291
3292 unsigned char regs[PRegister::number_of_registers];
3293 int count = 0;
3294 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3295 if (1 & bitset)
3296 regs[count++] = reg;
3297 bitset >>= 1;
3298 }
3299
3300 if (count == 0) {
3301 return 0;
3302 }
3303
3304 int total_pop_bytes = align_up(sve_predicate_size_in_slots *
3305 VMRegImpl::stack_slot_size * count, 16);
3306 for (int i = count - 1; i >= 0; i--) {
3307 sve_ldr(as_PRegister(regs[i]), Address(stack, i));
3308 }
3309 add(stack, stack, total_pop_bytes);
3310 return total_pop_bytes / 8;
3311 }
3312
3313 #ifdef ASSERT
3314 void MacroAssembler::verify_heapbase(const char* msg) {
3315 #if 0
3316 assert (Universe::heap() != nullptr, "java heap should be initialized");
3317 if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
3318 // rheapbase is allocated as general register
3319 return;
3320 }
3321 if (CheckCompressedOops) {
3322 Label ok;
3323 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
3324 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3325 br(Assembler::EQ, ok);
3326 stop(msg);
3327 bind(ok);
3328 pop(1 << rscratch1->encoding(), sp);
3329 }
3330 #endif
3331 }
3332 #endif
3333
3334 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3335 assert_different_registers(value, tmp1, tmp2);
3336 Label done, tagged, weak_tagged;
3337
3338 cbz(value, done); // Use null as-is.
3339 tst(value, JNIHandles::tag_mask); // Test for tag.
3340 br(Assembler::NE, tagged);
3341
3342 // Resolve local handle
3343 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
3344 verify_oop(value);
3345 b(done);
3346
3347 bind(tagged);
3348 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
3349 tbnz(value, 0, weak_tagged); // Test for weak tag.
3350
3351 // Resolve global handle
3352 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3353 verify_oop(value);
3354 b(done);
3355
3356 bind(weak_tagged);
3357 // Resolve jweak.
3358 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3359 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3360 verify_oop(value);
3361
3362 bind(done);
3363 }
3364
3365 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3366 assert_different_registers(value, tmp1, tmp2);
3367 Label done;
3368
3369 cbz(value, done); // Use null as-is.
3370
3371 #ifdef ASSERT
3372 {
3373 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3374 Label valid_global_tag;
3375 tbnz(value, 1, valid_global_tag); // Test for global tag
3376 stop("non global jobject using resolve_global_jobject");
3377 bind(valid_global_tag);
3378 }
3379 #endif
3380
3381 // Resolve global handle
3382 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3383 verify_oop(value);
3384
3385 bind(done);
3386 }
3387
3388 void MacroAssembler::stop(const char* msg) {
3389 // Skip AOT caching C strings in scratch buffer.
3390 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
3391 BLOCK_COMMENT(str);
3392 // load msg into r0 so we can access it from the signal handler
3393 // ExternalAddress enables saving and restoring via the code cache
3394 lea(c_rarg0, ExternalAddress((address) str));
3395 dcps1(0xdeae);
3396 }
3397
3398 void MacroAssembler::unimplemented(const char* what) {
3399 const char* buf = nullptr;
3400 {
3401 ResourceMark rm;
3402 stringStream ss;
3403 ss.print("unimplemented: %s", what);
3404 buf = code_string(ss.as_string());
3405 }
3406 stop(buf);
3407 }
3408
3409 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3410 #ifdef ASSERT
3411 Label OK;
3412 br(cc, OK);
3413 stop(msg);
3414 bind(OK);
3415 #endif
3416 }
3417
3418 // If a constant does not fit in an immediate field, generate some
3419 // number of MOV instructions and then perform the operation.
3420 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3421 add_sub_imm_insn insn1,
3422 add_sub_reg_insn insn2,
3423 bool is32) {
3424 assert(Rd != zr, "Rd = zr and not setting flags?");
3425 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3426 if (fits) {
3427 (this->*insn1)(Rd, Rn, imm);
3428 } else {
3429 if (g_uabs(imm) < (1 << 24)) {
3430 (this->*insn1)(Rd, Rn, imm & -(1 << 12));
3431 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3432 } else {
3433 assert_different_registers(Rd, Rn);
3434 mov(Rd, imm);
3435 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3436 }
3437 }
3438 }
3439
3440 // Separate vsn which sets the flags. Optimisations are more restricted
3441 // because we must set the flags correctly.
3442 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3443 add_sub_imm_insn insn1,
3444 add_sub_reg_insn insn2,
3445 bool is32) {
3446 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3447 if (fits) {
3448 (this->*insn1)(Rd, Rn, imm);
3449 } else {
3450 assert_different_registers(Rd, Rn);
3451 assert(Rd != zr, "overflow in immediate operand");
3452 mov(Rd, imm);
3453 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3454 }
3455 }
3456
3457
3458 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3459 if (increment.is_register()) {
3460 add(Rd, Rn, increment.as_register());
3461 } else {
3462 add(Rd, Rn, increment.as_constant());
3463 }
3464 }
3465
3466 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3467 if (increment.is_register()) {
3468 addw(Rd, Rn, increment.as_register());
3469 } else {
3470 addw(Rd, Rn, increment.as_constant());
3471 }
3472 }
3473
3474 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3475 if (decrement.is_register()) {
3476 sub(Rd, Rn, decrement.as_register());
3477 } else {
3478 sub(Rd, Rn, decrement.as_constant());
3479 }
3480 }
3481
3482 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3483 if (decrement.is_register()) {
3484 subw(Rd, Rn, decrement.as_register());
3485 } else {
3486 subw(Rd, Rn, decrement.as_constant());
3487 }
3488 }
3489
3490 void MacroAssembler::reinit_heapbase()
3491 {
3492 if (UseCompressedOops) {
3493 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3494 mov(rheapbase, CompressedOops::base());
3495 } else {
3496 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3497 ldr(rheapbase, Address(rheapbase));
3498 }
3499 }
3500 }
3501
3502 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3503 // doesn't retry and may fail spuriously. If the oldval is wanted,
3504 // Pass a register for the result, otherwise pass noreg.
3505
3506 // Clobbers rscratch1
3507 void MacroAssembler::cmpxchg(Register addr, Register expected,
3508 Register new_val,
3509 enum operand_size size,
3510 bool acquire, bool release,
3511 bool weak,
3512 Register result) {
3513 if (result == noreg) result = rscratch1;
3514 BLOCK_COMMENT("cmpxchg {");
3515 if (UseLSE) {
3516 mov(result, expected);
3517 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3518 compare_eq(result, expected, size);
3519 #ifdef ASSERT
3520 // Poison rscratch1 which is written on !UseLSE branch
3521 mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3522 #endif
3523 } else {
3524 Label retry_load, done;
3525 prfm(Address(addr), PSTL1STRM);
3526 bind(retry_load);
3527 load_exclusive(result, addr, size, acquire);
3528 compare_eq(result, expected, size);
3529 br(Assembler::NE, done);
3530 store_exclusive(rscratch1, new_val, addr, size, release);
3531 if (weak) {
3532 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
3533 } else {
3534 cbnzw(rscratch1, retry_load);
3535 }
3536 bind(done);
3537 }
3538 BLOCK_COMMENT("} cmpxchg");
3539 }
3540
3541 // A generic comparison. Only compares for equality, clobbers rscratch1.
3542 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3543 if (size == xword) {
3544 cmp(rm, rn);
3545 } else if (size == word) {
3546 cmpw(rm, rn);
3547 } else if (size == halfword) {
3548 eorw(rscratch1, rm, rn);
3549 ands(zr, rscratch1, 0xffff);
3550 } else if (size == byte) {
3551 eorw(rscratch1, rm, rn);
3552 ands(zr, rscratch1, 0xff);
3553 } else {
3554 ShouldNotReachHere();
3555 }
3556 }
3557
3558
3559 static bool different(Register a, RegisterOrConstant b, Register c) {
3560 if (b.is_constant())
3561 return a != c;
3562 else
3563 return a != b.as_register() && a != c && b.as_register() != c;
3564 }
3565
3566 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
3567 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3568 if (UseLSE) { \
3569 prev = prev->is_valid() ? prev : zr; \
3570 if (incr.is_register()) { \
3571 AOP(sz, incr.as_register(), prev, addr); \
3572 } else { \
3573 mov(rscratch2, incr.as_constant()); \
3574 AOP(sz, rscratch2, prev, addr); \
3575 } \
3576 return; \
3577 } \
3578 Register result = rscratch2; \
3579 if (prev->is_valid()) \
3580 result = different(prev, incr, addr) ? prev : rscratch2; \
3581 \
3582 Label retry_load; \
3583 prfm(Address(addr), PSTL1STRM); \
3584 bind(retry_load); \
3585 LDXR(result, addr); \
3586 OP(rscratch1, result, incr); \
3587 STXR(rscratch2, rscratch1, addr); \
3588 cbnzw(rscratch2, retry_load); \
3589 if (prev->is_valid() && prev != result) { \
3590 IOP(prev, rscratch1, incr); \
3591 } \
3592 }
3593
3594 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3595 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3596 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3597 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3598
3599 #undef ATOMIC_OP
3600
3601 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
3602 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3603 if (UseLSE) { \
3604 prev = prev->is_valid() ? prev : zr; \
3605 AOP(sz, newv, prev, addr); \
3606 return; \
3607 } \
3608 Register result = rscratch2; \
3609 if (prev->is_valid()) \
3610 result = different(prev, newv, addr) ? prev : rscratch2; \
3611 \
3612 Label retry_load; \
3613 prfm(Address(addr), PSTL1STRM); \
3614 bind(retry_load); \
3615 LDXR(result, addr); \
3616 STXR(rscratch1, newv, addr); \
3617 cbnzw(rscratch1, retry_load); \
3618 if (prev->is_valid() && prev != result) \
3619 mov(prev, result); \
3620 }
3621
3622 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3623 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3624 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3625 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3626 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3627 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3628
3629 #undef ATOMIC_XCHG
3630
3631 #ifndef PRODUCT
3632 extern "C" void findpc(intptr_t x);
3633 #endif
3634
3635 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3636 {
3637 // In order to get locks to work, we need to fake a in_VM state
3638 if (ShowMessageBoxOnError) {
3639 JavaThread* thread = JavaThread::current();
3640 thread->set_thread_state(_thread_in_vm);
3641 #ifndef PRODUCT
3642 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3643 ttyLocker ttyl;
3644 BytecodeCounter::print();
3645 }
3646 #endif
3647 if (os::message_box(msg, "Execution stopped, print registers?")) {
3648 ttyLocker ttyl;
3649 tty->print_cr(" pc = 0x%016" PRIx64, pc);
3650 #ifndef PRODUCT
3651 tty->cr();
3652 findpc(pc);
3653 tty->cr();
3654 #endif
3655 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3656 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3657 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3658 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3659 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3660 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3661 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3662 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3663 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3664 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3665 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3666 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3667 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3668 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3669 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3670 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3671 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3672 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3673 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3674 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3675 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3676 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3677 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3678 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3679 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3680 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3681 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3682 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3683 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3684 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3685 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3686 BREAKPOINT;
3687 }
3688 }
3689 fatal("DEBUG MESSAGE: %s", msg);
3690 }
3691
3692 RegSet MacroAssembler::call_clobbered_gp_registers() {
3693 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3694 #ifndef R18_RESERVED
3695 regs += r18_tls;
3696 #endif
3697 return regs;
3698 }
3699
3700 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3701 int step = 4 * wordSize;
3702 push(call_clobbered_gp_registers() - exclude, sp);
3703 sub(sp, sp, step);
3704 mov(rscratch1, -step);
3705 // Push v0-v7, v16-v31.
3706 for (int i = 31; i>= 4; i -= 4) {
3707 if (i <= v7->encoding() || i >= v16->encoding())
3708 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3709 as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3710 }
3711 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3712 as_FloatRegister(3), T1D, Address(sp));
3713 }
3714
3715 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3716 for (int i = 0; i < 32; i += 4) {
3717 if (i <= v7->encoding() || i >= v16->encoding())
3718 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3719 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3720 }
3721
3722 reinitialize_ptrue();
3723
3724 pop(call_clobbered_gp_registers() - exclude, sp);
3725 }
3726
3727 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3728 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3729 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3730 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3731 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3732 for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3733 sve_str(as_FloatRegister(i), Address(sp, i));
3734 }
3735 } else {
3736 int step = (save_vectors ? 8 : 4) * wordSize;
3737 mov(rscratch1, -step);
3738 sub(sp, sp, step);
3739 for (int i = 28; i >= 4; i -= 4) {
3740 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3741 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3742 }
3743 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3744 }
3745 if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3746 sub(sp, sp, total_predicate_in_bytes);
3747 for (int i = 0; i < PRegister::number_of_registers; i++) {
3748 sve_str(as_PRegister(i), Address(sp, i));
3749 }
3750 }
3751 }
3752
3753 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3754 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3755 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3756 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3757 sve_ldr(as_PRegister(i), Address(sp, i));
3758 }
3759 add(sp, sp, total_predicate_in_bytes);
3760 }
3761 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3762 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3763 sve_ldr(as_FloatRegister(i), Address(sp, i));
3764 }
3765 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3766 } else {
3767 int step = (restore_vectors ? 8 : 4) * wordSize;
3768 for (int i = 0; i <= 28; i += 4)
3769 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3770 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3771 }
3772
3773 // We may use predicate registers and rely on ptrue with SVE,
3774 // regardless of wide vector (> 8 bytes) used or not.
3775 if (use_sve) {
3776 reinitialize_ptrue();
3777 }
3778
3779 // integer registers except lr & sp
3780 pop(RegSet::range(r0, r17), sp);
3781 #ifdef R18_RESERVED
3782 ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3783 pop(RegSet::range(r20, r29), sp);
3784 #else
3785 pop(RegSet::range(r18_tls, r29), sp);
3786 #endif
3787 }
3788
3789 /**
3790 * Helpers for multiply_to_len().
3791 */
3792 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3793 Register src1, Register src2) {
3794 adds(dest_lo, dest_lo, src1);
3795 adc(dest_hi, dest_hi, zr);
3796 adds(dest_lo, dest_lo, src2);
3797 adc(final_dest_hi, dest_hi, zr);
3798 }
3799
3800 // Generate an address from (r + r1 extend offset). "size" is the
3801 // size of the operand. The result may be in rscratch2.
3802 Address MacroAssembler::offsetted_address(Register r, Register r1,
3803 Address::extend ext, int offset, int size) {
3804 if (offset || (ext.shift() % size != 0)) {
3805 lea(rscratch2, Address(r, r1, ext));
3806 return Address(rscratch2, offset);
3807 } else {
3808 return Address(r, r1, ext);
3809 }
3810 }
3811
3812 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3813 {
3814 assert(offset >= 0, "spill to negative address?");
3815 // Offset reachable ?
3816 // Not aligned - 9 bits signed offset
3817 // Aligned - 12 bits unsigned offset shifted
3818 Register base = sp;
3819 if ((offset & (size-1)) && offset >= (1<<8)) {
3820 add(tmp, base, offset & ((1<<12)-1));
3821 base = tmp;
3822 offset &= -1u<<12;
3823 }
3824
3825 if (offset >= (1<<12) * size) {
3826 add(tmp, base, offset & (((1<<12)-1)<<12));
3827 base = tmp;
3828 offset &= ~(((1<<12)-1)<<12);
3829 }
3830
3831 return Address(base, offset);
3832 }
3833
3834 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3835 assert(offset >= 0, "spill to negative address?");
3836
3837 Register base = sp;
3838
3839 // An immediate offset in the range 0 to 255 which is multiplied
3840 // by the current vector or predicate register size in bytes.
3841 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3842 return Address(base, offset / sve_reg_size_in_bytes);
3843 }
3844
3845 add(tmp, base, offset);
3846 return Address(tmp);
3847 }
3848
3849 // Checks whether offset is aligned.
3850 // Returns true if it is, else false.
3851 bool MacroAssembler::merge_alignment_check(Register base,
3852 size_t size,
3853 int64_t cur_offset,
3854 int64_t prev_offset) const {
3855 if (AvoidUnalignedAccesses) {
3856 if (base == sp) {
3857 // Checks whether low offset if aligned to pair of registers.
3858 int64_t pair_mask = size * 2 - 1;
3859 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3860 return (offset & pair_mask) == 0;
3861 } else { // If base is not sp, we can't guarantee the access is aligned.
3862 return false;
3863 }
3864 } else {
3865 int64_t mask = size - 1;
3866 // Load/store pair instruction only supports element size aligned offset.
3867 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3868 }
3869 }
3870
3871 // Checks whether current and previous loads/stores can be merged.
3872 // Returns true if it can be merged, else false.
3873 bool MacroAssembler::ldst_can_merge(Register rt,
3874 const Address &adr,
3875 size_t cur_size_in_bytes,
3876 bool is_store) const {
3877 address prev = pc() - NativeInstruction::instruction_size;
3878 address last = code()->last_insn();
3879
3880 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3881 return false;
3882 }
3883
3884 if (adr.getMode() != Address::base_plus_offset || prev != last) {
3885 return false;
3886 }
3887
3888 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3889 size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3890
3891 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3892 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3893
3894 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
3895 return false;
3896 }
3897
3898 int64_t max_offset = 63 * prev_size_in_bytes;
3899 int64_t min_offset = -64 * prev_size_in_bytes;
3900
3901 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
3902
3903 // Only same base can be merged.
3904 if (adr.base() != prev_ldst->base()) {
3905 return false;
3906 }
3907
3908 int64_t cur_offset = adr.offset();
3909 int64_t prev_offset = prev_ldst->offset();
3910 size_t diff = abs(cur_offset - prev_offset);
3911 if (diff != prev_size_in_bytes) {
3912 return false;
3913 }
3914
3915 // Following cases can not be merged:
3916 // ldr x2, [x2, #8]
3917 // ldr x3, [x2, #16]
3918 // or:
3919 // ldr x2, [x3, #8]
3920 // ldr x2, [x3, #16]
3921 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
3922 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
3923 return false;
3924 }
3925
3926 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3927 // Offset range must be in ldp/stp instruction's range.
3928 if (low_offset > max_offset || low_offset < min_offset) {
3929 return false;
3930 }
3931
3932 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
3933 return true;
3934 }
3935
3936 return false;
3937 }
3938
3939 // Merge current load/store with previous load/store into ldp/stp.
3940 void MacroAssembler::merge_ldst(Register rt,
3941 const Address &adr,
3942 size_t cur_size_in_bytes,
3943 bool is_store) {
3944
3945 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
3946
3947 Register rt_low, rt_high;
3948 address prev = pc() - NativeInstruction::instruction_size;
3949 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3950
3951 int64_t offset;
3952
3953 if (adr.offset() < prev_ldst->offset()) {
3954 offset = adr.offset();
3955 rt_low = rt;
3956 rt_high = prev_ldst->target();
3957 } else {
3958 offset = prev_ldst->offset();
3959 rt_low = prev_ldst->target();
3960 rt_high = rt;
3961 }
3962
3963 Address adr_p = Address(prev_ldst->base(), offset);
3964 // Overwrite previous generated binary.
3965 code_section()->set_end(prev);
3966
3967 const size_t sz = prev_ldst->size_in_bytes();
3968 assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
3969 if (!is_store) {
3970 BLOCK_COMMENT("merged ldr pair");
3971 if (sz == 8) {
3972 ldp(rt_low, rt_high, adr_p);
3973 } else {
3974 ldpw(rt_low, rt_high, adr_p);
3975 }
3976 } else {
3977 BLOCK_COMMENT("merged str pair");
3978 if (sz == 8) {
3979 stp(rt_low, rt_high, adr_p);
3980 } else {
3981 stpw(rt_low, rt_high, adr_p);
3982 }
3983 }
3984 }
3985
3986 /**
3987 * Multiply 64 bit by 64 bit first loop.
3988 */
3989 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
3990 Register y, Register y_idx, Register z,
3991 Register carry, Register product,
3992 Register idx, Register kdx) {
3993 //
3994 // jlong carry, x[], y[], z[];
3995 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3996 // huge_128 product = y[idx] * x[xstart] + carry;
3997 // z[kdx] = (jlong)product;
3998 // carry = (jlong)(product >>> 64);
3999 // }
4000 // z[xstart] = carry;
4001 //
4002
4003 Label L_first_loop, L_first_loop_exit;
4004 Label L_one_x, L_one_y, L_multiply;
4005
4006 subsw(xstart, xstart, 1);
4007 br(Assembler::MI, L_one_x);
4008
4009 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
4010 ldr(x_xstart, Address(rscratch1));
4011 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
4012
4013 bind(L_first_loop);
4014 subsw(idx, idx, 1);
4015 br(Assembler::MI, L_first_loop_exit);
4016 subsw(idx, idx, 1);
4017 br(Assembler::MI, L_one_y);
4018 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4019 ldr(y_idx, Address(rscratch1));
4020 ror(y_idx, y_idx, 32); // convert big-endian to little-endian
4021 bind(L_multiply);
4022
4023 // AArch64 has a multiply-accumulate instruction that we can't use
4024 // here because it has no way to process carries, so we have to use
4025 // separate add and adc instructions. Bah.
4026 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
4027 mul(product, x_xstart, y_idx);
4028 adds(product, product, carry);
4029 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
4030
4031 subw(kdx, kdx, 2);
4032 ror(product, product, 32); // back to big-endian
4033 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
4034
4035 b(L_first_loop);
4036
4037 bind(L_one_y);
4038 ldrw(y_idx, Address(y, 0));
4039 b(L_multiply);
4040
4041 bind(L_one_x);
4042 ldrw(x_xstart, Address(x, 0));
4043 b(L_first_loop);
4044
4045 bind(L_first_loop_exit);
4046 }
4047
4048 /**
4049 * Multiply 128 bit by 128. Unrolled inner loop.
4050 *
4051 */
4052 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
4053 Register carry, Register carry2,
4054 Register idx, Register jdx,
4055 Register yz_idx1, Register yz_idx2,
4056 Register tmp, Register tmp3, Register tmp4,
4057 Register tmp6, Register product_hi) {
4058
4059 // jlong carry, x[], y[], z[];
4060 // int kdx = ystart+1;
4061 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
4062 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
4063 // jlong carry2 = (jlong)(tmp3 >>> 64);
4064 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
4065 // carry = (jlong)(tmp4 >>> 64);
4066 // z[kdx+idx+1] = (jlong)tmp3;
4067 // z[kdx+idx] = (jlong)tmp4;
4068 // }
4069 // idx += 2;
4070 // if (idx > 0) {
4071 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
4072 // z[kdx+idx] = (jlong)yz_idx1;
4073 // carry = (jlong)(yz_idx1 >>> 64);
4074 // }
4075 //
4076
4077 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
4078
4079 lsrw(jdx, idx, 2);
4080
4081 bind(L_third_loop);
4082
4083 subsw(jdx, jdx, 1);
4084 br(Assembler::MI, L_third_loop_exit);
4085 subw(idx, idx, 4);
4086
4087 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4088
4089 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
4090
4091 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4092
4093 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
4094 ror(yz_idx2, yz_idx2, 32);
4095
4096 ldp(rscratch2, rscratch1, Address(tmp6, 0));
4097
4098 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4099 umulh(tmp4, product_hi, yz_idx1);
4100
4101 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
4102 ror(rscratch2, rscratch2, 32);
4103
4104 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
4105 umulh(carry2, product_hi, yz_idx2);
4106
4107 // propagate sum of both multiplications into carry:tmp4:tmp3
4108 adds(tmp3, tmp3, carry);
4109 adc(tmp4, tmp4, zr);
4110 adds(tmp3, tmp3, rscratch1);
4111 adcs(tmp4, tmp4, tmp);
4112 adc(carry, carry2, zr);
4113 adds(tmp4, tmp4, rscratch2);
4114 adc(carry, carry, zr);
4115
4116 ror(tmp3, tmp3, 32); // convert little-endian to big-endian
4117 ror(tmp4, tmp4, 32);
4118 stp(tmp4, tmp3, Address(tmp6, 0));
4119
4120 b(L_third_loop);
4121 bind (L_third_loop_exit);
4122
4123 andw (idx, idx, 0x3);
4124 cbz(idx, L_post_third_loop_done);
4125
4126 Label L_check_1;
4127 subsw(idx, idx, 2);
4128 br(Assembler::MI, L_check_1);
4129
4130 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4131 ldr(yz_idx1, Address(rscratch1, 0));
4132 ror(yz_idx1, yz_idx1, 32);
4133 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4134 umulh(tmp4, product_hi, yz_idx1);
4135 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4136 ldr(yz_idx2, Address(rscratch1, 0));
4137 ror(yz_idx2, yz_idx2, 32);
4138
4139 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
4140
4141 ror(tmp3, tmp3, 32);
4142 str(tmp3, Address(rscratch1, 0));
4143
4144 bind (L_check_1);
4145
4146 andw (idx, idx, 0x1);
4147 subsw(idx, idx, 1);
4148 br(Assembler::MI, L_post_third_loop_done);
4149 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4150 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
4151 umulh(carry2, tmp4, product_hi);
4152 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4153
4154 add2_with_carry(carry2, tmp3, tmp4, carry);
4155
4156 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4157 extr(carry, carry2, tmp3, 32);
4158
4159 bind(L_post_third_loop_done);
4160 }
4161
4162 /**
4163 * Code for BigInteger::multiplyToLen() intrinsic.
4164 *
4165 * r0: x
4166 * r1: xlen
4167 * r2: y
4168 * r3: ylen
4169 * r4: z
4170 * r5: tmp0
4171 * r10: tmp1
4172 * r11: tmp2
4173 * r12: tmp3
4174 * r13: tmp4
4175 * r14: tmp5
4176 * r15: tmp6
4177 * r16: tmp7
4178 *
4179 */
4180 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
4181 Register z, Register tmp0,
4182 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
4183 Register tmp5, Register tmp6, Register product_hi) {
4184
4185 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
4186
4187 const Register idx = tmp1;
4188 const Register kdx = tmp2;
4189 const Register xstart = tmp3;
4190
4191 const Register y_idx = tmp4;
4192 const Register carry = tmp5;
4193 const Register product = xlen;
4194 const Register x_xstart = tmp0;
4195
4196 // First Loop.
4197 //
4198 // final static long LONG_MASK = 0xffffffffL;
4199 // int xstart = xlen - 1;
4200 // int ystart = ylen - 1;
4201 // long carry = 0;
4202 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4203 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
4204 // z[kdx] = (int)product;
4205 // carry = product >>> 32;
4206 // }
4207 // z[xstart] = (int)carry;
4208 //
4209
4210 movw(idx, ylen); // idx = ylen;
4211 addw(kdx, xlen, ylen); // kdx = xlen+ylen;
4212 mov(carry, zr); // carry = 0;
4213
4214 Label L_done;
4215
4216 movw(xstart, xlen);
4217 subsw(xstart, xstart, 1);
4218 br(Assembler::MI, L_done);
4219
4220 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
4221
4222 Label L_second_loop;
4223 cbzw(kdx, L_second_loop);
4224
4225 Label L_carry;
4226 subw(kdx, kdx, 1);
4227 cbzw(kdx, L_carry);
4228
4229 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4230 lsr(carry, carry, 32);
4231 subw(kdx, kdx, 1);
4232
4233 bind(L_carry);
4234 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4235
4236 // Second and third (nested) loops.
4237 //
4238 // for (int i = xstart-1; i >= 0; i--) { // Second loop
4239 // carry = 0;
4240 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
4241 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
4242 // (z[k] & LONG_MASK) + carry;
4243 // z[k] = (int)product;
4244 // carry = product >>> 32;
4245 // }
4246 // z[i] = (int)carry;
4247 // }
4248 //
4249 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
4250
4251 const Register jdx = tmp1;
4252
4253 bind(L_second_loop);
4254 mov(carry, zr); // carry = 0;
4255 movw(jdx, ylen); // j = ystart+1
4256
4257 subsw(xstart, xstart, 1); // i = xstart-1;
4258 br(Assembler::MI, L_done);
4259
4260 str(z, Address(pre(sp, -4 * wordSize)));
4261
4262 Label L_last_x;
4263 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
4264 subsw(xstart, xstart, 1); // i = xstart-1;
4265 br(Assembler::MI, L_last_x);
4266
4267 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4268 ldr(product_hi, Address(rscratch1));
4269 ror(product_hi, product_hi, 32); // convert big-endian to little-endian
4270
4271 Label L_third_loop_prologue;
4272 bind(L_third_loop_prologue);
4273
4274 str(ylen, Address(sp, wordSize));
4275 stp(x, xstart, Address(sp, 2 * wordSize));
4276 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4277 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4278 ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4279 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
4280
4281 addw(tmp3, xlen, 1);
4282 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4283 subsw(tmp3, tmp3, 1);
4284 br(Assembler::MI, L_done);
4285
4286 lsr(carry, carry, 32);
4287 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4288 b(L_second_loop);
4289
4290 // Next infrequent code is moved outside loops.
4291 bind(L_last_x);
4292 ldrw(product_hi, Address(x, 0));
4293 b(L_third_loop_prologue);
4294
4295 bind(L_done);
4296 }
4297
4298 // Code for BigInteger::mulAdd intrinsic
4299 // out = r0
4300 // in = r1
4301 // offset = r2 (already out.length-offset)
4302 // len = r3
4303 // k = r4
4304 //
4305 // pseudo code from java implementation:
4306 // carry = 0;
4307 // offset = out.length-offset - 1;
4308 // for (int j=len-1; j >= 0; j--) {
4309 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4310 // out[offset--] = (int)product;
4311 // carry = product >>> 32;
4312 // }
4313 // return (int)carry;
4314 void MacroAssembler::mul_add(Register out, Register in, Register offset,
4315 Register len, Register k) {
4316 Label LOOP, END;
4317 // pre-loop
4318 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4319 csel(out, zr, out, Assembler::EQ);
4320 br(Assembler::EQ, END);
4321 add(in, in, len, LSL, 2); // in[j+1] address
4322 add(offset, out, offset, LSL, 2); // out[offset + 1] address
4323 mov(out, zr); // used to keep carry now
4324 BIND(LOOP);
4325 ldrw(rscratch1, Address(pre(in, -4)));
4326 madd(rscratch1, rscratch1, k, out);
4327 ldrw(rscratch2, Address(pre(offset, -4)));
4328 add(rscratch1, rscratch1, rscratch2);
4329 strw(rscratch1, Address(offset));
4330 lsr(out, rscratch1, 32);
4331 subs(len, len, 1);
4332 br(Assembler::NE, LOOP);
4333 BIND(END);
4334 }
4335
4336 /**
4337 * Emits code to update CRC-32 with a byte value according to constants in table
4338 *
4339 * @param [in,out]crc Register containing the crc.
4340 * @param [in]val Register containing the byte to fold into the CRC.
4341 * @param [in]table Register containing the table of crc constants.
4342 *
4343 * uint32_t crc;
4344 * val = crc_table[(val ^ crc) & 0xFF];
4345 * crc = val ^ (crc >> 8);
4346 *
4347 */
4348 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4349 eor(val, val, crc);
4350 andr(val, val, 0xff);
4351 ldrw(val, Address(table, val, Address::lsl(2)));
4352 eor(crc, val, crc, Assembler::LSR, 8);
4353 }
4354
4355 /**
4356 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4357 *
4358 * @param [in,out]crc Register containing the crc.
4359 * @param [in]v Register containing the 32-bit to fold into the CRC.
4360 * @param [in]table0 Register containing table 0 of crc constants.
4361 * @param [in]table1 Register containing table 1 of crc constants.
4362 * @param [in]table2 Register containing table 2 of crc constants.
4363 * @param [in]table3 Register containing table 3 of crc constants.
4364 *
4365 * uint32_t crc;
4366 * v = crc ^ v
4367 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4368 *
4369 */
4370 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4371 Register table0, Register table1, Register table2, Register table3,
4372 bool upper) {
4373 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4374 uxtb(tmp, v);
4375 ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4376 ubfx(tmp, v, 8, 8);
4377 ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4378 eor(crc, crc, tmp);
4379 ubfx(tmp, v, 16, 8);
4380 ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4381 eor(crc, crc, tmp);
4382 ubfx(tmp, v, 24, 8);
4383 ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4384 eor(crc, crc, tmp);
4385 }
4386
4387 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4388 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4389 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4390 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4391
4392 subs(tmp0, len, 384);
4393 mvnw(crc, crc);
4394 br(Assembler::GE, CRC_by128_pre);
4395 BIND(CRC_less128);
4396 subs(len, len, 32);
4397 br(Assembler::GE, CRC_by32_loop);
4398 BIND(CRC_less32);
4399 adds(len, len, 32 - 4);
4400 br(Assembler::GE, CRC_by4_loop);
4401 adds(len, len, 4);
4402 br(Assembler::GT, CRC_by1_loop);
4403 b(L_exit);
4404
4405 BIND(CRC_by32_loop);
4406 ldp(tmp0, tmp1, Address(buf));
4407 crc32x(crc, crc, tmp0);
4408 ldp(tmp2, tmp3, Address(buf, 16));
4409 crc32x(crc, crc, tmp1);
4410 add(buf, buf, 32);
4411 crc32x(crc, crc, tmp2);
4412 subs(len, len, 32);
4413 crc32x(crc, crc, tmp3);
4414 br(Assembler::GE, CRC_by32_loop);
4415 cmn(len, (u1)32);
4416 br(Assembler::NE, CRC_less32);
4417 b(L_exit);
4418
4419 BIND(CRC_by4_loop);
4420 ldrw(tmp0, Address(post(buf, 4)));
4421 subs(len, len, 4);
4422 crc32w(crc, crc, tmp0);
4423 br(Assembler::GE, CRC_by4_loop);
4424 adds(len, len, 4);
4425 br(Assembler::LE, L_exit);
4426 BIND(CRC_by1_loop);
4427 ldrb(tmp0, Address(post(buf, 1)));
4428 subs(len, len, 1);
4429 crc32b(crc, crc, tmp0);
4430 br(Assembler::GT, CRC_by1_loop);
4431 b(L_exit);
4432
4433 BIND(CRC_by128_pre);
4434 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4435 4*256*sizeof(juint) + 8*sizeof(juint));
4436 mov(crc, 0);
4437 crc32x(crc, crc, tmp0);
4438 crc32x(crc, crc, tmp1);
4439
4440 cbnz(len, CRC_less128);
4441
4442 BIND(L_exit);
4443 mvnw(crc, crc);
4444 }
4445
4446 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4447 Register len, Register tmp0, Register tmp1, Register tmp2,
4448 Register tmp3) {
4449 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4450 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4451
4452 mvnw(crc, crc);
4453
4454 subs(len, len, 128);
4455 br(Assembler::GE, CRC_by64_pre);
4456 BIND(CRC_less64);
4457 adds(len, len, 128-32);
4458 br(Assembler::GE, CRC_by32_loop);
4459 BIND(CRC_less32);
4460 adds(len, len, 32-4);
4461 br(Assembler::GE, CRC_by4_loop);
4462 adds(len, len, 4);
4463 br(Assembler::GT, CRC_by1_loop);
4464 b(L_exit);
4465
4466 BIND(CRC_by32_loop);
4467 ldp(tmp0, tmp1, Address(post(buf, 16)));
4468 subs(len, len, 32);
4469 crc32x(crc, crc, tmp0);
4470 ldr(tmp2, Address(post(buf, 8)));
4471 crc32x(crc, crc, tmp1);
4472 ldr(tmp3, Address(post(buf, 8)));
4473 crc32x(crc, crc, tmp2);
4474 crc32x(crc, crc, tmp3);
4475 br(Assembler::GE, CRC_by32_loop);
4476 cmn(len, (u1)32);
4477 br(Assembler::NE, CRC_less32);
4478 b(L_exit);
4479
4480 BIND(CRC_by4_loop);
4481 ldrw(tmp0, Address(post(buf, 4)));
4482 subs(len, len, 4);
4483 crc32w(crc, crc, tmp0);
4484 br(Assembler::GE, CRC_by4_loop);
4485 adds(len, len, 4);
4486 br(Assembler::LE, L_exit);
4487 BIND(CRC_by1_loop);
4488 ldrb(tmp0, Address(post(buf, 1)));
4489 subs(len, len, 1);
4490 crc32b(crc, crc, tmp0);
4491 br(Assembler::GT, CRC_by1_loop);
4492 b(L_exit);
4493
4494 BIND(CRC_by64_pre);
4495 sub(buf, buf, 8);
4496 ldp(tmp0, tmp1, Address(buf, 8));
4497 crc32x(crc, crc, tmp0);
4498 ldr(tmp2, Address(buf, 24));
4499 crc32x(crc, crc, tmp1);
4500 ldr(tmp3, Address(buf, 32));
4501 crc32x(crc, crc, tmp2);
4502 ldr(tmp0, Address(buf, 40));
4503 crc32x(crc, crc, tmp3);
4504 ldr(tmp1, Address(buf, 48));
4505 crc32x(crc, crc, tmp0);
4506 ldr(tmp2, Address(buf, 56));
4507 crc32x(crc, crc, tmp1);
4508 ldr(tmp3, Address(pre(buf, 64)));
4509
4510 b(CRC_by64_loop);
4511
4512 align(CodeEntryAlignment);
4513 BIND(CRC_by64_loop);
4514 subs(len, len, 64);
4515 crc32x(crc, crc, tmp2);
4516 ldr(tmp0, Address(buf, 8));
4517 crc32x(crc, crc, tmp3);
4518 ldr(tmp1, Address(buf, 16));
4519 crc32x(crc, crc, tmp0);
4520 ldr(tmp2, Address(buf, 24));
4521 crc32x(crc, crc, tmp1);
4522 ldr(tmp3, Address(buf, 32));
4523 crc32x(crc, crc, tmp2);
4524 ldr(tmp0, Address(buf, 40));
4525 crc32x(crc, crc, tmp3);
4526 ldr(tmp1, Address(buf, 48));
4527 crc32x(crc, crc, tmp0);
4528 ldr(tmp2, Address(buf, 56));
4529 crc32x(crc, crc, tmp1);
4530 ldr(tmp3, Address(pre(buf, 64)));
4531 br(Assembler::GE, CRC_by64_loop);
4532
4533 // post-loop
4534 crc32x(crc, crc, tmp2);
4535 crc32x(crc, crc, tmp3);
4536
4537 sub(len, len, 64);
4538 add(buf, buf, 8);
4539 cmn(len, (u1)128);
4540 br(Assembler::NE, CRC_less64);
4541 BIND(L_exit);
4542 mvnw(crc, crc);
4543 }
4544
4545 /**
4546 * @param crc register containing existing CRC (32-bit)
4547 * @param buf register pointing to input byte buffer (byte*)
4548 * @param len register containing number of bytes
4549 * @param table register that will contain address of CRC table
4550 * @param tmp scratch register
4551 */
4552 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4553 Register table0, Register table1, Register table2, Register table3,
4554 Register tmp, Register tmp2, Register tmp3) {
4555 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4556
4557 if (UseCryptoPmullForCRC32) {
4558 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4559 return;
4560 }
4561
4562 if (UseCRC32) {
4563 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4564 return;
4565 }
4566
4567 mvnw(crc, crc);
4568
4569 {
4570 uint64_t offset;
4571 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4572 add(table0, table0, offset);
4573 }
4574 add(table1, table0, 1*256*sizeof(juint));
4575 add(table2, table0, 2*256*sizeof(juint));
4576 add(table3, table0, 3*256*sizeof(juint));
4577
4578 { // Neon code start
4579 cmp(len, (u1)64);
4580 br(Assembler::LT, L_by16);
4581 eor(v16, T16B, v16, v16);
4582
4583 Label L_fold;
4584
4585 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4586
4587 ld1(v0, v1, T2D, post(buf, 32));
4588 ld1r(v4, T2D, post(tmp, 8));
4589 ld1r(v5, T2D, post(tmp, 8));
4590 ld1r(v6, T2D, post(tmp, 8));
4591 ld1r(v7, T2D, post(tmp, 8));
4592 mov(v16, S, 0, crc);
4593
4594 eor(v0, T16B, v0, v16);
4595 sub(len, len, 64);
4596
4597 BIND(L_fold);
4598 pmull(v22, T8H, v0, v5, T8B);
4599 pmull(v20, T8H, v0, v7, T8B);
4600 pmull(v23, T8H, v0, v4, T8B);
4601 pmull(v21, T8H, v0, v6, T8B);
4602
4603 pmull2(v18, T8H, v0, v5, T16B);
4604 pmull2(v16, T8H, v0, v7, T16B);
4605 pmull2(v19, T8H, v0, v4, T16B);
4606 pmull2(v17, T8H, v0, v6, T16B);
4607
4608 uzp1(v24, T8H, v20, v22);
4609 uzp2(v25, T8H, v20, v22);
4610 eor(v20, T16B, v24, v25);
4611
4612 uzp1(v26, T8H, v16, v18);
4613 uzp2(v27, T8H, v16, v18);
4614 eor(v16, T16B, v26, v27);
4615
4616 ushll2(v22, T4S, v20, T8H, 8);
4617 ushll(v20, T4S, v20, T4H, 8);
4618
4619 ushll2(v18, T4S, v16, T8H, 8);
4620 ushll(v16, T4S, v16, T4H, 8);
4621
4622 eor(v22, T16B, v23, v22);
4623 eor(v18, T16B, v19, v18);
4624 eor(v20, T16B, v21, v20);
4625 eor(v16, T16B, v17, v16);
4626
4627 uzp1(v17, T2D, v16, v20);
4628 uzp2(v21, T2D, v16, v20);
4629 eor(v17, T16B, v17, v21);
4630
4631 ushll2(v20, T2D, v17, T4S, 16);
4632 ushll(v16, T2D, v17, T2S, 16);
4633
4634 eor(v20, T16B, v20, v22);
4635 eor(v16, T16B, v16, v18);
4636
4637 uzp1(v17, T2D, v20, v16);
4638 uzp2(v21, T2D, v20, v16);
4639 eor(v28, T16B, v17, v21);
4640
4641 pmull(v22, T8H, v1, v5, T8B);
4642 pmull(v20, T8H, v1, v7, T8B);
4643 pmull(v23, T8H, v1, v4, T8B);
4644 pmull(v21, T8H, v1, v6, T8B);
4645
4646 pmull2(v18, T8H, v1, v5, T16B);
4647 pmull2(v16, T8H, v1, v7, T16B);
4648 pmull2(v19, T8H, v1, v4, T16B);
4649 pmull2(v17, T8H, v1, v6, T16B);
4650
4651 ld1(v0, v1, T2D, post(buf, 32));
4652
4653 uzp1(v24, T8H, v20, v22);
4654 uzp2(v25, T8H, v20, v22);
4655 eor(v20, T16B, v24, v25);
4656
4657 uzp1(v26, T8H, v16, v18);
4658 uzp2(v27, T8H, v16, v18);
4659 eor(v16, T16B, v26, v27);
4660
4661 ushll2(v22, T4S, v20, T8H, 8);
4662 ushll(v20, T4S, v20, T4H, 8);
4663
4664 ushll2(v18, T4S, v16, T8H, 8);
4665 ushll(v16, T4S, v16, T4H, 8);
4666
4667 eor(v22, T16B, v23, v22);
4668 eor(v18, T16B, v19, v18);
4669 eor(v20, T16B, v21, v20);
4670 eor(v16, T16B, v17, v16);
4671
4672 uzp1(v17, T2D, v16, v20);
4673 uzp2(v21, T2D, v16, v20);
4674 eor(v16, T16B, v17, v21);
4675
4676 ushll2(v20, T2D, v16, T4S, 16);
4677 ushll(v16, T2D, v16, T2S, 16);
4678
4679 eor(v20, T16B, v22, v20);
4680 eor(v16, T16B, v16, v18);
4681
4682 uzp1(v17, T2D, v20, v16);
4683 uzp2(v21, T2D, v20, v16);
4684 eor(v20, T16B, v17, v21);
4685
4686 shl(v16, T2D, v28, 1);
4687 shl(v17, T2D, v20, 1);
4688
4689 eor(v0, T16B, v0, v16);
4690 eor(v1, T16B, v1, v17);
4691
4692 subs(len, len, 32);
4693 br(Assembler::GE, L_fold);
4694
4695 mov(crc, 0);
4696 mov(tmp, v0, D, 0);
4697 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4698 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4699 mov(tmp, v0, D, 1);
4700 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4701 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4702 mov(tmp, v1, D, 0);
4703 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4704 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4705 mov(tmp, v1, D, 1);
4706 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4707 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4708
4709 add(len, len, 32);
4710 } // Neon code end
4711
4712 BIND(L_by16);
4713 subs(len, len, 16);
4714 br(Assembler::GE, L_by16_loop);
4715 adds(len, len, 16-4);
4716 br(Assembler::GE, L_by4_loop);
4717 adds(len, len, 4);
4718 br(Assembler::GT, L_by1_loop);
4719 b(L_exit);
4720
4721 BIND(L_by4_loop);
4722 ldrw(tmp, Address(post(buf, 4)));
4723 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4724 subs(len, len, 4);
4725 br(Assembler::GE, L_by4_loop);
4726 adds(len, len, 4);
4727 br(Assembler::LE, L_exit);
4728 BIND(L_by1_loop);
4729 subs(len, len, 1);
4730 ldrb(tmp, Address(post(buf, 1)));
4731 update_byte_crc32(crc, tmp, table0);
4732 br(Assembler::GT, L_by1_loop);
4733 b(L_exit);
4734
4735 align(CodeEntryAlignment);
4736 BIND(L_by16_loop);
4737 subs(len, len, 16);
4738 ldp(tmp, tmp3, Address(post(buf, 16)));
4739 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4740 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4741 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4742 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4743 br(Assembler::GE, L_by16_loop);
4744 adds(len, len, 16-4);
4745 br(Assembler::GE, L_by4_loop);
4746 adds(len, len, 4);
4747 br(Assembler::GT, L_by1_loop);
4748 BIND(L_exit);
4749 mvnw(crc, crc);
4750 }
4751
4752 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4753 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4754 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4755 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4756
4757 subs(tmp0, len, 384);
4758 br(Assembler::GE, CRC_by128_pre);
4759 BIND(CRC_less128);
4760 subs(len, len, 32);
4761 br(Assembler::GE, CRC_by32_loop);
4762 BIND(CRC_less32);
4763 adds(len, len, 32 - 4);
4764 br(Assembler::GE, CRC_by4_loop);
4765 adds(len, len, 4);
4766 br(Assembler::GT, CRC_by1_loop);
4767 b(L_exit);
4768
4769 BIND(CRC_by32_loop);
4770 ldp(tmp0, tmp1, Address(buf));
4771 crc32cx(crc, crc, tmp0);
4772 ldr(tmp2, Address(buf, 16));
4773 crc32cx(crc, crc, tmp1);
4774 ldr(tmp3, Address(buf, 24));
4775 crc32cx(crc, crc, tmp2);
4776 add(buf, buf, 32);
4777 subs(len, len, 32);
4778 crc32cx(crc, crc, tmp3);
4779 br(Assembler::GE, CRC_by32_loop);
4780 cmn(len, (u1)32);
4781 br(Assembler::NE, CRC_less32);
4782 b(L_exit);
4783
4784 BIND(CRC_by4_loop);
4785 ldrw(tmp0, Address(post(buf, 4)));
4786 subs(len, len, 4);
4787 crc32cw(crc, crc, tmp0);
4788 br(Assembler::GE, CRC_by4_loop);
4789 adds(len, len, 4);
4790 br(Assembler::LE, L_exit);
4791 BIND(CRC_by1_loop);
4792 ldrb(tmp0, Address(post(buf, 1)));
4793 subs(len, len, 1);
4794 crc32cb(crc, crc, tmp0);
4795 br(Assembler::GT, CRC_by1_loop);
4796 b(L_exit);
4797
4798 BIND(CRC_by128_pre);
4799 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4800 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4801 mov(crc, 0);
4802 crc32cx(crc, crc, tmp0);
4803 crc32cx(crc, crc, tmp1);
4804
4805 cbnz(len, CRC_less128);
4806
4807 BIND(L_exit);
4808 }
4809
4810 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4811 Register len, Register tmp0, Register tmp1, Register tmp2,
4812 Register tmp3) {
4813 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4814 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4815
4816 subs(len, len, 128);
4817 br(Assembler::GE, CRC_by64_pre);
4818 BIND(CRC_less64);
4819 adds(len, len, 128-32);
4820 br(Assembler::GE, CRC_by32_loop);
4821 BIND(CRC_less32);
4822 adds(len, len, 32-4);
4823 br(Assembler::GE, CRC_by4_loop);
4824 adds(len, len, 4);
4825 br(Assembler::GT, CRC_by1_loop);
4826 b(L_exit);
4827
4828 BIND(CRC_by32_loop);
4829 ldp(tmp0, tmp1, Address(post(buf, 16)));
4830 subs(len, len, 32);
4831 crc32cx(crc, crc, tmp0);
4832 ldr(tmp2, Address(post(buf, 8)));
4833 crc32cx(crc, crc, tmp1);
4834 ldr(tmp3, Address(post(buf, 8)));
4835 crc32cx(crc, crc, tmp2);
4836 crc32cx(crc, crc, tmp3);
4837 br(Assembler::GE, CRC_by32_loop);
4838 cmn(len, (u1)32);
4839 br(Assembler::NE, CRC_less32);
4840 b(L_exit);
4841
4842 BIND(CRC_by4_loop);
4843 ldrw(tmp0, Address(post(buf, 4)));
4844 subs(len, len, 4);
4845 crc32cw(crc, crc, tmp0);
4846 br(Assembler::GE, CRC_by4_loop);
4847 adds(len, len, 4);
4848 br(Assembler::LE, L_exit);
4849 BIND(CRC_by1_loop);
4850 ldrb(tmp0, Address(post(buf, 1)));
4851 subs(len, len, 1);
4852 crc32cb(crc, crc, tmp0);
4853 br(Assembler::GT, CRC_by1_loop);
4854 b(L_exit);
4855
4856 BIND(CRC_by64_pre);
4857 sub(buf, buf, 8);
4858 ldp(tmp0, tmp1, Address(buf, 8));
4859 crc32cx(crc, crc, tmp0);
4860 ldr(tmp2, Address(buf, 24));
4861 crc32cx(crc, crc, tmp1);
4862 ldr(tmp3, Address(buf, 32));
4863 crc32cx(crc, crc, tmp2);
4864 ldr(tmp0, Address(buf, 40));
4865 crc32cx(crc, crc, tmp3);
4866 ldr(tmp1, Address(buf, 48));
4867 crc32cx(crc, crc, tmp0);
4868 ldr(tmp2, Address(buf, 56));
4869 crc32cx(crc, crc, tmp1);
4870 ldr(tmp3, Address(pre(buf, 64)));
4871
4872 b(CRC_by64_loop);
4873
4874 align(CodeEntryAlignment);
4875 BIND(CRC_by64_loop);
4876 subs(len, len, 64);
4877 crc32cx(crc, crc, tmp2);
4878 ldr(tmp0, Address(buf, 8));
4879 crc32cx(crc, crc, tmp3);
4880 ldr(tmp1, Address(buf, 16));
4881 crc32cx(crc, crc, tmp0);
4882 ldr(tmp2, Address(buf, 24));
4883 crc32cx(crc, crc, tmp1);
4884 ldr(tmp3, Address(buf, 32));
4885 crc32cx(crc, crc, tmp2);
4886 ldr(tmp0, Address(buf, 40));
4887 crc32cx(crc, crc, tmp3);
4888 ldr(tmp1, Address(buf, 48));
4889 crc32cx(crc, crc, tmp0);
4890 ldr(tmp2, Address(buf, 56));
4891 crc32cx(crc, crc, tmp1);
4892 ldr(tmp3, Address(pre(buf, 64)));
4893 br(Assembler::GE, CRC_by64_loop);
4894
4895 // post-loop
4896 crc32cx(crc, crc, tmp2);
4897 crc32cx(crc, crc, tmp3);
4898
4899 sub(len, len, 64);
4900 add(buf, buf, 8);
4901 cmn(len, (u1)128);
4902 br(Assembler::NE, CRC_less64);
4903 BIND(L_exit);
4904 }
4905
4906 /**
4907 * @param crc register containing existing CRC (32-bit)
4908 * @param buf register pointing to input byte buffer (byte*)
4909 * @param len register containing number of bytes
4910 * @param table register that will contain address of CRC table
4911 * @param tmp scratch register
4912 */
4913 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
4914 Register table0, Register table1, Register table2, Register table3,
4915 Register tmp, Register tmp2, Register tmp3) {
4916 if (UseCryptoPmullForCRC32) {
4917 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4918 } else {
4919 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
4920 }
4921 }
4922
4923 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
4924 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
4925 Label CRC_by128_loop;
4926 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4927
4928 sub(len, len, 256);
4929 Register table = tmp0;
4930 {
4931 uint64_t offset;
4932 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4933 add(table, table, offset);
4934 }
4935 add(table, table, table_offset);
4936
4937 // Registers v0..v7 are used as data registers.
4938 // Registers v16..v31 are used as tmp registers.
4939 sub(buf, buf, 0x10);
4940 ldrq(v0, Address(buf, 0x10));
4941 ldrq(v1, Address(buf, 0x20));
4942 ldrq(v2, Address(buf, 0x30));
4943 ldrq(v3, Address(buf, 0x40));
4944 ldrq(v4, Address(buf, 0x50));
4945 ldrq(v5, Address(buf, 0x60));
4946 ldrq(v6, Address(buf, 0x70));
4947 ldrq(v7, Address(pre(buf, 0x80)));
4948
4949 movi(v31, T4S, 0);
4950 mov(v31, S, 0, crc);
4951 eor(v0, T16B, v0, v31);
4952
4953 // Register v16 contains constants from the crc table.
4954 ldrq(v16, Address(table));
4955 b(CRC_by128_loop);
4956
4957 align(OptoLoopAlignment);
4958 BIND(CRC_by128_loop);
4959 pmull (v17, T1Q, v0, v16, T1D);
4960 pmull2(v18, T1Q, v0, v16, T2D);
4961 ldrq(v0, Address(buf, 0x10));
4962 eor3(v0, T16B, v17, v18, v0);
4963
4964 pmull (v19, T1Q, v1, v16, T1D);
4965 pmull2(v20, T1Q, v1, v16, T2D);
4966 ldrq(v1, Address(buf, 0x20));
4967 eor3(v1, T16B, v19, v20, v1);
4968
4969 pmull (v21, T1Q, v2, v16, T1D);
4970 pmull2(v22, T1Q, v2, v16, T2D);
4971 ldrq(v2, Address(buf, 0x30));
4972 eor3(v2, T16B, v21, v22, v2);
4973
4974 pmull (v23, T1Q, v3, v16, T1D);
4975 pmull2(v24, T1Q, v3, v16, T2D);
4976 ldrq(v3, Address(buf, 0x40));
4977 eor3(v3, T16B, v23, v24, v3);
4978
4979 pmull (v25, T1Q, v4, v16, T1D);
4980 pmull2(v26, T1Q, v4, v16, T2D);
4981 ldrq(v4, Address(buf, 0x50));
4982 eor3(v4, T16B, v25, v26, v4);
4983
4984 pmull (v27, T1Q, v5, v16, T1D);
4985 pmull2(v28, T1Q, v5, v16, T2D);
4986 ldrq(v5, Address(buf, 0x60));
4987 eor3(v5, T16B, v27, v28, v5);
4988
4989 pmull (v29, T1Q, v6, v16, T1D);
4990 pmull2(v30, T1Q, v6, v16, T2D);
4991 ldrq(v6, Address(buf, 0x70));
4992 eor3(v6, T16B, v29, v30, v6);
4993
4994 // Reuse registers v23, v24.
4995 // Using them won't block the first instruction of the next iteration.
4996 pmull (v23, T1Q, v7, v16, T1D);
4997 pmull2(v24, T1Q, v7, v16, T2D);
4998 ldrq(v7, Address(pre(buf, 0x80)));
4999 eor3(v7, T16B, v23, v24, v7);
5000
5001 subs(len, len, 0x80);
5002 br(Assembler::GE, CRC_by128_loop);
5003
5004 // fold into 512 bits
5005 // Use v31 for constants because v16 can be still in use.
5006 ldrq(v31, Address(table, 0x10));
5007
5008 pmull (v17, T1Q, v0, v31, T1D);
5009 pmull2(v18, T1Q, v0, v31, T2D);
5010 eor3(v0, T16B, v17, v18, v4);
5011
5012 pmull (v19, T1Q, v1, v31, T1D);
5013 pmull2(v20, T1Q, v1, v31, T2D);
5014 eor3(v1, T16B, v19, v20, v5);
5015
5016 pmull (v21, T1Q, v2, v31, T1D);
5017 pmull2(v22, T1Q, v2, v31, T2D);
5018 eor3(v2, T16B, v21, v22, v6);
5019
5020 pmull (v23, T1Q, v3, v31, T1D);
5021 pmull2(v24, T1Q, v3, v31, T2D);
5022 eor3(v3, T16B, v23, v24, v7);
5023
5024 // fold into 128 bits
5025 // Use v17 for constants because v31 can be still in use.
5026 ldrq(v17, Address(table, 0x20));
5027 pmull (v25, T1Q, v0, v17, T1D);
5028 pmull2(v26, T1Q, v0, v17, T2D);
5029 eor3(v3, T16B, v3, v25, v26);
5030
5031 // Use v18 for constants because v17 can be still in use.
5032 ldrq(v18, Address(table, 0x30));
5033 pmull (v27, T1Q, v1, v18, T1D);
5034 pmull2(v28, T1Q, v1, v18, T2D);
5035 eor3(v3, T16B, v3, v27, v28);
5036
5037 // Use v19 for constants because v18 can be still in use.
5038 ldrq(v19, Address(table, 0x40));
5039 pmull (v29, T1Q, v2, v19, T1D);
5040 pmull2(v30, T1Q, v2, v19, T2D);
5041 eor3(v0, T16B, v3, v29, v30);
5042
5043 add(len, len, 0x80);
5044 add(buf, buf, 0x10);
5045
5046 mov(tmp0, v0, D, 0);
5047 mov(tmp1, v0, D, 1);
5048 }
5049
5050 void MacroAssembler::addptr(const Address &dst, int32_t src) {
5051 Address adr;
5052 switch(dst.getMode()) {
5053 case Address::base_plus_offset:
5054 // This is the expected mode, although we allow all the other
5055 // forms below.
5056 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
5057 break;
5058 default:
5059 lea(rscratch2, dst);
5060 adr = Address(rscratch2);
5061 break;
5062 }
5063 ldr(rscratch1, adr);
5064 add(rscratch1, rscratch1, src);
5065 str(rscratch1, adr);
5066 }
5067
5068 void MacroAssembler::cmpptr(Register src1, Address src2) {
5069 uint64_t offset;
5070 adrp(rscratch1, src2, offset);
5071 ldr(rscratch1, Address(rscratch1, offset));
5072 cmp(src1, rscratch1);
5073 }
5074
5075 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
5076 cmp(obj1, obj2);
5077 }
5078
5079 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5080 load_method_holder(rresult, rmethod);
5081 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5082 }
5083
5084 void MacroAssembler::load_method_holder(Register holder, Register method) {
5085 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
5086 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5087 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5088 }
5089
5090 // Loads the obj's Klass* into dst.
5091 // Preserves all registers (incl src, rscratch1 and rscratch2).
5092 // Input:
5093 // src - the oop we want to load the klass from.
5094 // dst - output narrow klass.
5095 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5096 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
5097 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5098 lsr(dst, dst, markWord::klass_shift);
5099 }
5100
5101 void MacroAssembler::load_klass(Register dst, Register src) {
5102 if (UseCompactObjectHeaders) {
5103 load_narrow_klass_compact(dst, src);
5104 } else {
5105 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5106 }
5107 decode_klass_not_null(dst);
5108 }
5109
5110 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
5111 if (RestoreMXCSROnJNICalls) {
5112 Label OK;
5113 get_fpcr(tmp1);
5114 mov(tmp2, tmp1);
5115 // Set FPCR to the state we need. We do want Round to Nearest. We
5116 // don't want non-IEEE rounding modes or floating-point traps.
5117 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
5118 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
5119 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
5120 eor(tmp2, tmp1, tmp2);
5121 cbz(tmp2, OK); // Only reset FPCR if it's wrong
5122 set_fpcr(tmp1);
5123 bind(OK);
5124 }
5125 }
5126
5127 // ((OopHandle)result).resolve();
5128 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
5129 // OopHandle::resolve is an indirection.
5130 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
5131 }
5132
5133 // ((WeakHandle)result).resolve();
5134 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
5135 assert_different_registers(result, tmp1, tmp2);
5136 Label resolved;
5137
5138 // A null weak handle resolves to null.
5139 cbz(result, resolved);
5140
5141 // Only 64 bit platforms support GCs that require a tmp register
5142 // WeakHandle::resolve is an indirection like jweak.
5143 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5144 result, Address(result), tmp1, tmp2);
5145 bind(resolved);
5146 }
5147
5148 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5149 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5150 ldr(dst, Address(rmethod, Method::const_offset()));
5151 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5152 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5153 ldr(dst, Address(dst, mirror_offset));
5154 resolve_oop_handle(dst, tmp1, tmp2);
5155 }
5156
5157 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5158 assert_different_registers(obj, klass, tmp);
5159 if (UseCompactObjectHeaders) {
5160 load_narrow_klass_compact(tmp, obj);
5161 } else {
5162 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5163 }
5164 if (CompressedKlassPointers::base() == nullptr) {
5165 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5166 return;
5167 } else if (!AOTCodeCache::is_on_for_dump() &&
5168 ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5169 && CompressedKlassPointers::shift() == 0) {
5170 // Only the bottom 32 bits matter
5171 cmpw(klass, tmp);
5172 return;
5173 }
5174 decode_klass_not_null(tmp);
5175 cmp(klass, tmp);
5176 }
5177
5178 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5179 if (UseCompactObjectHeaders) {
5180 load_narrow_klass_compact(tmp1, obj1);
5181 load_narrow_klass_compact(tmp2, obj2);
5182 } else {
5183 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5184 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5185 }
5186 cmpw(tmp1, tmp2);
5187 }
5188
5189 void MacroAssembler::store_klass(Register dst, Register src) {
5190 // FIXME: Should this be a store release? concurrent gcs assumes
5191 // klass length is valid if klass field is not null.
5192 assert(!UseCompactObjectHeaders, "not with compact headers");
5193 encode_klass_not_null(src);
5194 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5195 }
5196
5197 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5198 assert(!UseCompactObjectHeaders, "not with compact headers");
5199 // Store to klass gap in destination
5200 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
5201 }
5202
5203 // Algorithm must match CompressedOops::encode.
5204 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5205 #ifdef ASSERT
5206 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5207 #endif
5208 verify_oop_msg(s, "broken oop in encode_heap_oop");
5209 if (CompressedOops::base() == nullptr) {
5210 if (CompressedOops::shift() != 0) {
5211 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5212 lsr(d, s, LogMinObjAlignmentInBytes);
5213 } else {
5214 mov(d, s);
5215 }
5216 } else {
5217 subs(d, s, rheapbase);
5218 csel(d, d, zr, Assembler::HS);
5219 lsr(d, d, LogMinObjAlignmentInBytes);
5220
5221 /* Old algorithm: is this any worse?
5222 Label nonnull;
5223 cbnz(r, nonnull);
5224 sub(r, r, rheapbase);
5225 bind(nonnull);
5226 lsr(r, r, LogMinObjAlignmentInBytes);
5227 */
5228 }
5229 }
5230
5231 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5232 #ifdef ASSERT
5233 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5234 if (CheckCompressedOops) {
5235 Label ok;
5236 cbnz(r, ok);
5237 stop("null oop passed to encode_heap_oop_not_null");
5238 bind(ok);
5239 }
5240 #endif
5241 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5242 if (CompressedOops::base() != nullptr) {
5243 sub(r, r, rheapbase);
5244 }
5245 if (CompressedOops::shift() != 0) {
5246 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5247 lsr(r, r, LogMinObjAlignmentInBytes);
5248 }
5249 }
5250
5251 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5252 #ifdef ASSERT
5253 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5254 if (CheckCompressedOops) {
5255 Label ok;
5256 cbnz(src, ok);
5257 stop("null oop passed to encode_heap_oop_not_null2");
5258 bind(ok);
5259 }
5260 #endif
5261 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5262
5263 Register data = src;
5264 if (CompressedOops::base() != nullptr) {
5265 sub(dst, src, rheapbase);
5266 data = dst;
5267 }
5268 if (CompressedOops::shift() != 0) {
5269 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5270 lsr(dst, data, LogMinObjAlignmentInBytes);
5271 data = dst;
5272 }
5273 if (data == src)
5274 mov(dst, src);
5275 }
5276
5277 void MacroAssembler::decode_heap_oop(Register d, Register s) {
5278 #ifdef ASSERT
5279 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5280 #endif
5281 if (CompressedOops::base() == nullptr) {
5282 if (CompressedOops::shift() != 0) {
5283 lsl(d, s, CompressedOops::shift());
5284 } else if (d != s) {
5285 mov(d, s);
5286 }
5287 } else {
5288 Label done;
5289 if (d != s)
5290 mov(d, s);
5291 cbz(s, done);
5292 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5293 bind(done);
5294 }
5295 verify_oop_msg(d, "broken oop in decode_heap_oop");
5296 }
5297
5298 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5299 assert (UseCompressedOops, "should only be used for compressed headers");
5300 assert (Universe::heap() != nullptr, "java heap should be initialized");
5301 // Cannot assert, unverified entry point counts instructions (see .ad file)
5302 // vtableStubs also counts instructions in pd_code_size_limit.
5303 // Also do not verify_oop as this is called by verify_oop.
5304 if (CompressedOops::shift() != 0) {
5305 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5306 if (CompressedOops::base() != nullptr) {
5307 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5308 } else {
5309 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5310 }
5311 } else {
5312 assert (CompressedOops::base() == nullptr, "sanity");
5313 }
5314 }
5315
5316 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5317 assert (UseCompressedOops, "should only be used for compressed headers");
5318 assert (Universe::heap() != nullptr, "java heap should be initialized");
5319 // Cannot assert, unverified entry point counts instructions (see .ad file)
5320 // vtableStubs also counts instructions in pd_code_size_limit.
5321 // Also do not verify_oop as this is called by verify_oop.
5322 if (CompressedOops::shift() != 0) {
5323 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5324 if (CompressedOops::base() != nullptr) {
5325 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5326 } else {
5327 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5328 }
5329 } else {
5330 assert (CompressedOops::base() == nullptr, "sanity");
5331 if (dst != src) {
5332 mov(dst, src);
5333 }
5334 }
5335 }
5336
5337 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5338
5339 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5340 assert(Metaspace::initialized(), "metaspace not initialized yet");
5341 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
5342 return _klass_decode_mode;
5343 }
5344
5345 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
5346 // KlassDecodeMode shouldn't be set already.
5347 assert(_klass_decode_mode == KlassDecodeNone, "set once");
5348
5349 if (base == nullptr) {
5350 return KlassDecodeZero;
5351 }
5352
5353 if (operand_valid_for_logical_immediate(
5354 /*is32*/false, (uint64_t)base)) {
5355 const uint64_t range_mask = right_n_bits(log2i_ceil(range));
5356 if (((uint64_t)base & range_mask) == 0) {
5357 return KlassDecodeXor;
5358 }
5359 }
5360
5361 const uint64_t shifted_base =
5362 (uint64_t)base >> shift;
5363 if ((shifted_base & 0xffff0000ffffffff) == 0) {
5364 return KlassDecodeMovk;
5365 }
5366
5367 // No valid encoding.
5368 return KlassDecodeNone;
5369 }
5370
5371 // Check if one of the above decoding modes will work for given base, shift and range.
5372 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
5373 return klass_decode_mode(base, shift, range) != KlassDecodeNone;
5374 }
5375
5376 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
5377 _klass_decode_mode = klass_decode_mode(base, shift, range);
5378 return _klass_decode_mode != KlassDecodeNone;
5379 }
5380
5381 static Register pick_different_tmp(Register dst, Register src) {
5382 auto tmps = RegSet::of(r0, r1, r2) - RegSet::of(src, dst);
5383 return *tmps.begin();
5384 }
5385
5386 void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) {
5387 // we have to load the klass base from the AOT constants area but
5388 // not the shift because it is not allowed to change
5389 int shift = CompressedKlassPointers::shift();
5390 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5391 if (dst != src) {
5392 // we can load the base into dst, subtract it formthe src and shift down
5393 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5394 ldr(dst, dst);
5395 sub(dst, src, dst);
5396 lsr(dst, dst, shift);
5397 } else {
5398 // we need an extra register in order to load the coop base
5399 Register tmp = pick_different_tmp(dst, src);
5400 RegSet regs = RegSet::of(tmp);
5401 push(regs, sp);
5402 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5403 ldr(tmp, tmp);
5404 sub(dst, src, tmp);
5405 lsr(dst, dst, shift);
5406 pop(regs, sp);
5407 }
5408 }
5409
5410 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5411 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5412 encode_klass_not_null_for_aot(dst, src);
5413 return;
5414 }
5415
5416 switch (klass_decode_mode()) {
5417 case KlassDecodeZero:
5418 if (CompressedKlassPointers::shift() != 0) {
5419 lsr(dst, src, CompressedKlassPointers::shift());
5420 } else {
5421 if (dst != src) mov(dst, src);
5422 }
5423 break;
5424
5425 case KlassDecodeXor:
5426 if (CompressedKlassPointers::shift() != 0) {
5427 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5428 lsr(dst, dst, CompressedKlassPointers::shift());
5429 } else {
5430 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5431 }
5432 break;
5433
5434 case KlassDecodeMovk:
5435 if (CompressedKlassPointers::shift() != 0) {
5436 ubfx(dst, src, CompressedKlassPointers::shift(), 32);
5437 } else {
5438 movw(dst, src);
5439 }
5440 break;
5441
5442 case KlassDecodeNone:
5443 ShouldNotReachHere();
5444 break;
5445 }
5446 }
5447
5448 void MacroAssembler::encode_klass_not_null(Register r) {
5449 encode_klass_not_null(r, r);
5450 }
5451
5452 void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
5453 // we have to load the klass base from the AOT constants area but
5454 // not the shift because it is not allowed to change
5455 int shift = CompressedKlassPointers::shift();
5456 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5457 if (dst != src) {
5458 // we can load the base into dst then add the offset with a suitable shift
5459 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5460 ldr(dst, dst);
5461 add(dst, dst, src, LSL, shift);
5462 } else {
5463 // we need an extra register in order to load the coop base
5464 Register tmp = pick_different_tmp(dst, src);
5465 RegSet regs = RegSet::of(tmp);
5466 push(regs, sp);
5467 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5468 ldr(tmp, tmp);
5469 add(dst, tmp, src, LSL, shift);
5470 pop(regs, sp);
5471 }
5472 }
5473
5474 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5475 if (AOTCodeCache::is_on_for_dump()) {
5476 decode_klass_not_null_for_aot(dst, src);
5477 return;
5478 }
5479
5480 switch (klass_decode_mode()) {
5481 case KlassDecodeZero:
5482 if (CompressedKlassPointers::shift() != 0) {
5483 lsl(dst, src, CompressedKlassPointers::shift());
5484 } else {
5485 if (dst != src) mov(dst, src);
5486 }
5487 break;
5488
5489 case KlassDecodeXor:
5490 if (CompressedKlassPointers::shift() != 0) {
5491 lsl(dst, src, CompressedKlassPointers::shift());
5492 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5493 } else {
5494 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5495 }
5496 break;
5497
5498 case KlassDecodeMovk: {
5499 const uint64_t shifted_base =
5500 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5501
5502 if (dst != src) movw(dst, src);
5503 movk(dst, shifted_base >> 32, 32);
5504
5505 if (CompressedKlassPointers::shift() != 0) {
5506 lsl(dst, dst, CompressedKlassPointers::shift());
5507 }
5508
5509 break;
5510 }
5511
5512 case KlassDecodeNone:
5513 ShouldNotReachHere();
5514 break;
5515 }
5516 }
5517
5518 void MacroAssembler::decode_klass_not_null(Register r) {
5519 decode_klass_not_null(r, r);
5520 }
5521
5522 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5523 #ifdef ASSERT
5524 {
5525 ThreadInVMfromUnknown tiv;
5526 assert (UseCompressedOops, "should only be used for compressed oops");
5527 assert (Universe::heap() != nullptr, "java heap should be initialized");
5528 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5529 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5530 }
5531 #endif
5532 int oop_index = oop_recorder()->find_index(obj);
5533 InstructionMark im(this);
5534 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5535 code_section()->relocate(inst_mark(), rspec);
5536 movz(dst, 0xDEAD, 16);
5537 movk(dst, 0xBEEF);
5538 }
5539
5540 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5541 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5542 int index = oop_recorder()->find_index(k);
5543
5544 InstructionMark im(this);
5545 RelocationHolder rspec = metadata_Relocation::spec(index);
5546 code_section()->relocate(inst_mark(), rspec);
5547 narrowKlass nk = CompressedKlassPointers::encode(k);
5548 movz(dst, (nk >> 16), 16);
5549 movk(dst, nk & 0xffff);
5550 }
5551
5552 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5553 Register dst, Address src,
5554 Register tmp1, Register tmp2) {
5555 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5556 decorators = AccessInternal::decorator_fixup(decorators, type);
5557 bool as_raw = (decorators & AS_RAW) != 0;
5558 if (as_raw) {
5559 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5560 } else {
5561 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5562 }
5563 }
5564
5565 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5566 Address dst, Register val,
5567 Register tmp1, Register tmp2, Register tmp3) {
5568 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5569 decorators = AccessInternal::decorator_fixup(decorators, type);
5570 bool as_raw = (decorators & AS_RAW) != 0;
5571 if (as_raw) {
5572 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5573 } else {
5574 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5575 }
5576 }
5577
5578 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5579 Register tmp2, DecoratorSet decorators) {
5580 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5581 }
5582
5583 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5584 Register tmp2, DecoratorSet decorators) {
5585 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5586 }
5587
5588 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5589 Register tmp2, Register tmp3, DecoratorSet decorators) {
5590 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5591 }
5592
5593 // Used for storing nulls.
5594 void MacroAssembler::store_heap_oop_null(Address dst) {
5595 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5596 }
5597
5598 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5599 assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5600 int index = oop_recorder()->allocate_metadata_index(obj);
5601 RelocationHolder rspec = metadata_Relocation::spec(index);
5602 return Address((address)obj, rspec);
5603 }
5604
5605 // Move an oop into a register.
5606 void MacroAssembler::movoop(Register dst, jobject obj) {
5607 int oop_index;
5608 if (obj == nullptr) {
5609 oop_index = oop_recorder()->allocate_oop_index(obj);
5610 } else {
5611 #ifdef ASSERT
5612 {
5613 ThreadInVMfromUnknown tiv;
5614 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5615 }
5616 #endif
5617 oop_index = oop_recorder()->find_index(obj);
5618 }
5619 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5620
5621 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5622 mov(dst, Address((address)obj, rspec));
5623 } else {
5624 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5625 ldr(dst, Address(dummy, rspec));
5626 }
5627 }
5628
5629 // Move a metadata address into a register.
5630 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5631 int oop_index;
5632 if (obj == nullptr) {
5633 oop_index = oop_recorder()->allocate_metadata_index(obj);
5634 } else {
5635 oop_index = oop_recorder()->find_index(obj);
5636 }
5637 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5638 mov(dst, Address((address)obj, rspec));
5639 }
5640
5641 Address MacroAssembler::constant_oop_address(jobject obj) {
5642 #ifdef ASSERT
5643 {
5644 ThreadInVMfromUnknown tiv;
5645 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5646 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5647 }
5648 #endif
5649 int oop_index = oop_recorder()->find_index(obj);
5650 return Address((address)obj, oop_Relocation::spec(oop_index));
5651 }
5652
5653 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5654 void MacroAssembler::tlab_allocate(Register obj,
5655 Register var_size_in_bytes,
5656 int con_size_in_bytes,
5657 Register t1,
5658 Register t2,
5659 Label& slow_case) {
5660 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5661 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5662 }
5663
5664 void MacroAssembler::verify_tlab() {
5665 #ifdef ASSERT
5666 if (UseTLAB && VerifyOops) {
5667 Label next, ok;
5668
5669 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5670
5671 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5672 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5673 cmp(rscratch2, rscratch1);
5674 br(Assembler::HS, next);
5675 STOP("assert(top >= start)");
5676 should_not_reach_here();
5677
5678 bind(next);
5679 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5680 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5681 cmp(rscratch2, rscratch1);
5682 br(Assembler::HS, ok);
5683 STOP("assert(top <= end)");
5684 should_not_reach_here();
5685
5686 bind(ok);
5687 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5688 }
5689 #endif
5690 }
5691
5692 // Writes to stack successive pages until offset reached to check for
5693 // stack overflow + shadow pages. This clobbers tmp.
5694 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5695 assert_different_registers(tmp, size, rscratch1);
5696 mov(tmp, sp);
5697 // Bang stack for total size given plus shadow page size.
5698 // Bang one page at a time because large size can bang beyond yellow and
5699 // red zones.
5700 Label loop;
5701 mov(rscratch1, (int)os::vm_page_size());
5702 bind(loop);
5703 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5704 subsw(size, size, rscratch1);
5705 str(size, Address(tmp));
5706 br(Assembler::GT, loop);
5707
5708 // Bang down shadow pages too.
5709 // At this point, (tmp-0) is the last address touched, so don't
5710 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5711 // was post-decremented.) Skip this address by starting at i=1, and
5712 // touch a few more pages below. N.B. It is important to touch all
5713 // the way down to and including i=StackShadowPages.
5714 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5715 // this could be any sized move but this is can be a debugging crumb
5716 // so the bigger the better.
5717 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5718 str(size, Address(tmp));
5719 }
5720 }
5721
5722 // Move the address of the polling page into dest.
5723 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
5724 ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
5725 }
5726
5727 // Read the polling page. The address of the polling page must
5728 // already be in r.
5729 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
5730 address mark;
5731 {
5732 InstructionMark im(this);
5733 code_section()->relocate(inst_mark(), rtype);
5734 ldrw(zr, Address(r, 0));
5735 mark = inst_mark();
5736 }
5737 verify_cross_modify_fence_not_required();
5738 return mark;
5739 }
5740
5741 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
5742 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
5743 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
5744 uint64_t dest_page = (uint64_t)dest.target() >> 12;
5745 int64_t offset_low = dest_page - low_page;
5746 int64_t offset_high = dest_page - high_page;
5747
5748 assert(is_valid_AArch64_address(dest.target()), "bad address");
5749 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5750
5751 InstructionMark im(this);
5752 code_section()->relocate(inst_mark(), dest.rspec());
5753 // 8143067: Ensure that the adrp can reach the dest from anywhere within
5754 // the code cache so that if it is relocated we know it will still reach
5755 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5756 _adrp(reg1, dest.target());
5757 } else {
5758 uint64_t target = (uint64_t)dest.target();
5759 uint64_t adrp_target
5760 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5761
5762 _adrp(reg1, (address)adrp_target);
5763 movk(reg1, target >> 32, 32);
5764 }
5765 byte_offset = (uint64_t)dest.target() & 0xfff;
5766 }
5767
5768 void MacroAssembler::load_byte_map_base(Register reg) {
5769 #if INCLUDE_CDS
5770 if (AOTCodeCache::is_on_for_dump()) {
5771 address byte_map_base_adr = AOTRuntimeConstants::card_table_base_address();
5772 lea(reg, ExternalAddress(byte_map_base_adr));
5773 ldr(reg, Address(reg));
5774 return;
5775 }
5776 #endif
5777 CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
5778
5779 // Strictly speaking the card table base isn't an address at all, and it might
5780 // even be negative. It is thus materialised as a constant.
5781 mov(reg, (uint64_t)ctbs->card_table_base_const());
5782 }
5783
5784 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5785 #if INCLUDE_CDS
5786 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5787 if (AOTCodeCache::is_on_for_dump()) {
5788 // all aotrc field addresses should be registered in the AOTCodeCache address table
5789 lea(reg, ExternalAddress(a));
5790 } else {
5791 mov(reg, (uint64_t)a);
5792 }
5793 #else
5794 ShouldNotReachHere();
5795 #endif
5796 }
5797
5798 void MacroAssembler::build_frame(int framesize) {
5799 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5800 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5801 protect_return_address();
5802 if (framesize < ((1 << 9) + 2 * wordSize)) {
5803 sub(sp, sp, framesize);
5804 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5805 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5806 } else {
5807 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5808 if (PreserveFramePointer) mov(rfp, sp);
5809 if (framesize < ((1 << 12) + 2 * wordSize))
5810 sub(sp, sp, framesize - 2 * wordSize);
5811 else {
5812 mov(rscratch1, framesize - 2 * wordSize);
5813 sub(sp, sp, rscratch1);
5814 }
5815 }
5816 verify_cross_modify_fence_not_required();
5817 }
5818
5819 void MacroAssembler::remove_frame(int framesize) {
5820 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5821 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5822 if (framesize < ((1 << 9) + 2 * wordSize)) {
5823 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5824 add(sp, sp, framesize);
5825 } else {
5826 if (framesize < ((1 << 12) + 2 * wordSize))
5827 add(sp, sp, framesize - 2 * wordSize);
5828 else {
5829 mov(rscratch1, framesize - 2 * wordSize);
5830 add(sp, sp, rscratch1);
5831 }
5832 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5833 }
5834 authenticate_return_address();
5835 }
5836
5837
5838 // This method counts leading positive bytes (highest bit not set) in provided byte array
5839 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5840 // Simple and most common case of aligned small array which is not at the
5841 // end of memory page is placed here. All other cases are in stub.
5842 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5843 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5844 assert_different_registers(ary1, len, result);
5845
5846 mov(result, len);
5847 cmpw(len, 0);
5848 br(LE, DONE);
5849 cmpw(len, 4 * wordSize);
5850 br(GE, STUB_LONG); // size > 32 then go to stub
5851
5852 int shift = 64 - exact_log2(os::vm_page_size());
5853 lsl(rscratch1, ary1, shift);
5854 mov(rscratch2, (size_t)(4 * wordSize) << shift);
5855 adds(rscratch2, rscratch1, rscratch2); // At end of page?
5856 br(CS, STUB); // at the end of page then go to stub
5857 subs(len, len, wordSize);
5858 br(LT, END);
5859
5860 BIND(LOOP);
5861 ldr(rscratch1, Address(post(ary1, wordSize)));
5862 tst(rscratch1, UPPER_BIT_MASK);
5863 br(NE, SET_RESULT);
5864 subs(len, len, wordSize);
5865 br(GE, LOOP);
5866 cmpw(len, -wordSize);
5867 br(EQ, DONE);
5868
5869 BIND(END);
5870 ldr(rscratch1, Address(ary1));
5871 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
5872 lslv(rscratch1, rscratch1, rscratch2);
5873 tst(rscratch1, UPPER_BIT_MASK);
5874 br(NE, SET_RESULT);
5875 b(DONE);
5876
5877 BIND(STUB);
5878 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
5879 assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
5880 address tpc1 = trampoline_call(count_pos);
5881 if (tpc1 == nullptr) {
5882 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
5883 postcond(pc() == badAddress);
5884 return nullptr;
5885 }
5886 b(DONE);
5887
5888 BIND(STUB_LONG);
5889 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
5890 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
5891 address tpc2 = trampoline_call(count_pos_long);
5892 if (tpc2 == nullptr) {
5893 DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
5894 postcond(pc() == badAddress);
5895 return nullptr;
5896 }
5897 b(DONE);
5898
5899 BIND(SET_RESULT);
5900
5901 add(len, len, wordSize);
5902 sub(result, result, len);
5903
5904 BIND(DONE);
5905 postcond(pc() != badAddress);
5906 return pc();
5907 }
5908
5909 // Clobbers: rscratch1, rscratch2, rflags
5910 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
5911 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
5912 Register tmp4, Register tmp5, Register result,
5913 Register cnt1, int elem_size) {
5914 Label DONE, SAME;
5915 Register tmp1 = rscratch1;
5916 Register tmp2 = rscratch2;
5917 int elem_per_word = wordSize/elem_size;
5918 int log_elem_size = exact_log2(elem_size);
5919 int klass_offset = arrayOopDesc::klass_offset_in_bytes();
5920 int length_offset = arrayOopDesc::length_offset_in_bytes();
5921 int base_offset
5922 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
5923 // When the length offset is not aligned to 8 bytes,
5924 // then we align it down. This is valid because the new
5925 // offset will always be the klass which is the same
5926 // for type arrays.
5927 int start_offset = align_down(length_offset, BytesPerWord);
5928 int extra_length = base_offset - start_offset;
5929 assert(start_offset == length_offset || start_offset == klass_offset,
5930 "start offset must be 8-byte-aligned or be the klass offset");
5931 assert(base_offset != start_offset, "must include the length field");
5932 extra_length = extra_length / elem_size; // We count in elements, not bytes.
5933 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
5934
5935 assert(elem_size == 1 || elem_size == 2, "must be char or byte");
5936 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
5937
5938 #ifndef PRODUCT
5939 {
5940 const char kind = (elem_size == 2) ? 'U' : 'L';
5941 char comment[64];
5942 os::snprintf_checked(comment, sizeof comment, "array_equals%c{", kind);
5943 BLOCK_COMMENT(comment);
5944 }
5945 #endif
5946
5947 // if (a1 == a2)
5948 // return true;
5949 cmpoop(a1, a2); // May have read barriers for a1 and a2.
5950 br(EQ, SAME);
5951
5952 if (UseSimpleArrayEquals) {
5953 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
5954 // if (a1 == nullptr || a2 == nullptr)
5955 // return false;
5956 // a1 & a2 == 0 means (some-pointer is null) or
5957 // (very-rare-or-even-probably-impossible-pointer-values)
5958 // so, we can save one branch in most cases
5959 tst(a1, a2);
5960 mov(result, false);
5961 br(EQ, A_MIGHT_BE_NULL);
5962 // if (a1.length != a2.length)
5963 // return false;
5964 bind(A_IS_NOT_NULL);
5965 ldrw(cnt1, Address(a1, length_offset));
5966 ldrw(tmp5, Address(a2, length_offset));
5967 cmp(cnt1, tmp5);
5968 br(NE, DONE); // If lengths differ, return false
5969 // Increase loop counter by diff between base- and actual start-offset.
5970 addw(cnt1, cnt1, extra_length);
5971 lea(a1, Address(a1, start_offset));
5972 lea(a2, Address(a2, start_offset));
5973 // Check for short strings, i.e. smaller than wordSize.
5974 subs(cnt1, cnt1, elem_per_word);
5975 br(Assembler::LT, SHORT);
5976 // Main 8 byte comparison loop.
5977 bind(NEXT_WORD); {
5978 ldr(tmp1, Address(post(a1, wordSize)));
5979 ldr(tmp2, Address(post(a2, wordSize)));
5980 subs(cnt1, cnt1, elem_per_word);
5981 eor(tmp5, tmp1, tmp2);
5982 cbnz(tmp5, DONE);
5983 } br(GT, NEXT_WORD);
5984 // Last longword. In the case where length == 4 we compare the
5985 // same longword twice, but that's still faster than another
5986 // conditional branch.
5987 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
5988 // length == 4.
5989 if (log_elem_size > 0)
5990 lsl(cnt1, cnt1, log_elem_size);
5991 ldr(tmp3, Address(a1, cnt1));
5992 ldr(tmp4, Address(a2, cnt1));
5993 eor(tmp5, tmp3, tmp4);
5994 cbnz(tmp5, DONE);
5995 b(SAME);
5996 bind(A_MIGHT_BE_NULL);
5997 // in case both a1 and a2 are not-null, proceed with loads
5998 cbz(a1, DONE);
5999 cbz(a2, DONE);
6000 b(A_IS_NOT_NULL);
6001 bind(SHORT);
6002
6003 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
6004 {
6005 ldrw(tmp1, Address(post(a1, 4)));
6006 ldrw(tmp2, Address(post(a2, 4)));
6007 eorw(tmp5, tmp1, tmp2);
6008 cbnzw(tmp5, DONE);
6009 }
6010 bind(TAIL03);
6011 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
6012 {
6013 ldrh(tmp3, Address(post(a1, 2)));
6014 ldrh(tmp4, Address(post(a2, 2)));
6015 eorw(tmp5, tmp3, tmp4);
6016 cbnzw(tmp5, DONE);
6017 }
6018 bind(TAIL01);
6019 if (elem_size == 1) { // Only needed when comparing byte arrays.
6020 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6021 {
6022 ldrb(tmp1, a1);
6023 ldrb(tmp2, a2);
6024 eorw(tmp5, tmp1, tmp2);
6025 cbnzw(tmp5, DONE);
6026 }
6027 }
6028 } else {
6029 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
6030 CSET_EQ, LAST_CHECK;
6031 mov(result, false);
6032 cbz(a1, DONE);
6033 ldrw(cnt1, Address(a1, length_offset));
6034 cbz(a2, DONE);
6035 ldrw(tmp5, Address(a2, length_offset));
6036 cmp(cnt1, tmp5);
6037 br(NE, DONE); // If lengths differ, return false
6038 // Increase loop counter by diff between base- and actual start-offset.
6039 addw(cnt1, cnt1, extra_length);
6040
6041 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
6042 // faster to perform another branch before comparing a1 and a2
6043 cmp(cnt1, (u1)elem_per_word);
6044 br(LE, SHORT); // short or same
6045 ldr(tmp3, Address(pre(a1, start_offset)));
6046 subs(zr, cnt1, stubBytesThreshold);
6047 br(GE, STUB);
6048 ldr(tmp4, Address(pre(a2, start_offset)));
6049 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6050
6051 // Main 16 byte comparison loop with 2 exits
6052 bind(NEXT_DWORD); {
6053 ldr(tmp1, Address(pre(a1, wordSize)));
6054 ldr(tmp2, Address(pre(a2, wordSize)));
6055 subs(cnt1, cnt1, 2 * elem_per_word);
6056 br(LE, TAIL);
6057 eor(tmp4, tmp3, tmp4);
6058 cbnz(tmp4, DONE);
6059 ldr(tmp3, Address(pre(a1, wordSize)));
6060 ldr(tmp4, Address(pre(a2, wordSize)));
6061 cmp(cnt1, (u1)elem_per_word);
6062 br(LE, TAIL2);
6063 cmp(tmp1, tmp2);
6064 } br(EQ, NEXT_DWORD);
6065 b(DONE);
6066
6067 bind(TAIL);
6068 eor(tmp4, tmp3, tmp4);
6069 eor(tmp2, tmp1, tmp2);
6070 lslv(tmp2, tmp2, tmp5);
6071 orr(tmp5, tmp4, tmp2);
6072 cmp(tmp5, zr);
6073 b(CSET_EQ);
6074
6075 bind(TAIL2);
6076 eor(tmp2, tmp1, tmp2);
6077 cbnz(tmp2, DONE);
6078 b(LAST_CHECK);
6079
6080 bind(STUB);
6081 ldr(tmp4, Address(pre(a2, start_offset)));
6082 if (elem_size == 2) { // convert to byte counter
6083 lsl(cnt1, cnt1, 1);
6084 }
6085 eor(tmp5, tmp3, tmp4);
6086 cbnz(tmp5, DONE);
6087 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
6088 assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
6089 address tpc = trampoline_call(stub);
6090 if (tpc == nullptr) {
6091 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
6092 postcond(pc() == badAddress);
6093 return nullptr;
6094 }
6095 b(DONE);
6096
6097 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
6098 // so, if a2 == null => return false(0), else return true, so we can return a2
6099 mov(result, a2);
6100 b(DONE);
6101 bind(SHORT);
6102 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6103 ldr(tmp3, Address(a1, start_offset));
6104 ldr(tmp4, Address(a2, start_offset));
6105 bind(LAST_CHECK);
6106 eor(tmp4, tmp3, tmp4);
6107 lslv(tmp5, tmp4, tmp5);
6108 cmp(tmp5, zr);
6109 bind(CSET_EQ);
6110 cset(result, EQ);
6111 b(DONE);
6112 }
6113
6114 bind(SAME);
6115 mov(result, true);
6116 // That's it.
6117 bind(DONE);
6118
6119 BLOCK_COMMENT("} array_equals");
6120 postcond(pc() != badAddress);
6121 return pc();
6122 }
6123
6124 // Compare Strings
6125
6126 // For Strings we're passed the address of the first characters in a1
6127 // and a2 and the length in cnt1.
6128 // There are two implementations. For arrays >= 8 bytes, all
6129 // comparisons (including the final one, which may overlap) are
6130 // performed 8 bytes at a time. For strings < 8 bytes, we compare a
6131 // halfword, then a short, and then a byte.
6132
6133 void MacroAssembler::string_equals(Register a1, Register a2,
6134 Register result, Register cnt1)
6135 {
6136 Label SAME, DONE, SHORT, NEXT_WORD;
6137 Register tmp1 = rscratch1;
6138 Register tmp2 = rscratch2;
6139
6140 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6141
6142 #ifndef PRODUCT
6143 {
6144 char comment[64];
6145 os::snprintf_checked(comment, sizeof comment, "{string_equalsL");
6146 BLOCK_COMMENT(comment);
6147 }
6148 #endif
6149
6150 mov(result, false);
6151
6152 // Check for short strings, i.e. smaller than wordSize.
6153 subs(cnt1, cnt1, wordSize);
6154 br(Assembler::LT, SHORT);
6155 // Main 8 byte comparison loop.
6156 bind(NEXT_WORD); {
6157 ldr(tmp1, Address(post(a1, wordSize)));
6158 ldr(tmp2, Address(post(a2, wordSize)));
6159 subs(cnt1, cnt1, wordSize);
6160 eor(tmp1, tmp1, tmp2);
6161 cbnz(tmp1, DONE);
6162 } br(GT, NEXT_WORD);
6163 // Last longword. In the case where length == 4 we compare the
6164 // same longword twice, but that's still faster than another
6165 // conditional branch.
6166 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6167 // length == 4.
6168 ldr(tmp1, Address(a1, cnt1));
6169 ldr(tmp2, Address(a2, cnt1));
6170 eor(tmp2, tmp1, tmp2);
6171 cbnz(tmp2, DONE);
6172 b(SAME);
6173
6174 bind(SHORT);
6175 Label TAIL03, TAIL01;
6176
6177 tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
6178 {
6179 ldrw(tmp1, Address(post(a1, 4)));
6180 ldrw(tmp2, Address(post(a2, 4)));
6181 eorw(tmp1, tmp1, tmp2);
6182 cbnzw(tmp1, DONE);
6183 }
6184 bind(TAIL03);
6185 tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
6186 {
6187 ldrh(tmp1, Address(post(a1, 2)));
6188 ldrh(tmp2, Address(post(a2, 2)));
6189 eorw(tmp1, tmp1, tmp2);
6190 cbnzw(tmp1, DONE);
6191 }
6192 bind(TAIL01);
6193 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6194 {
6195 ldrb(tmp1, a1);
6196 ldrb(tmp2, a2);
6197 eorw(tmp1, tmp1, tmp2);
6198 cbnzw(tmp1, DONE);
6199 }
6200 // Arrays are equal.
6201 bind(SAME);
6202 mov(result, true);
6203
6204 // That's it.
6205 bind(DONE);
6206 BLOCK_COMMENT("} string_equals");
6207 }
6208
6209
6210 // The size of the blocks erased by the zero_blocks stub. We must
6211 // handle anything smaller than this ourselves in zero_words().
6212 const int MacroAssembler::zero_words_block_size = 8;
6213
6214 // zero_words() is used by C2 ClearArray patterns and by
6215 // C1_MacroAssembler. It is as small as possible, handling small word
6216 // counts locally and delegating anything larger to the zero_blocks
6217 // stub. It is expanded many times in compiled code, so it is
6218 // important to keep it short.
6219
6220 // ptr: Address of a buffer to be zeroed.
6221 // cnt: Count in HeapWords.
6222 //
6223 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
6224 address MacroAssembler::zero_words(Register ptr, Register cnt)
6225 {
6226 assert(is_power_of_2(zero_words_block_size), "adjust this");
6227
6228 BLOCK_COMMENT("zero_words {");
6229 assert(ptr == r10 && cnt == r11, "mismatch in register usage");
6230 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6231 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6232
6233 subs(rscratch1, cnt, zero_words_block_size);
6234 Label around;
6235 br(LO, around);
6236 {
6237 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6238 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6239 // Make sure this is a C2 compilation. C1 allocates space only for
6240 // trampoline stubs generated by Call LIR ops, and in any case it
6241 // makes sense for a C1 compilation task to proceed as quickly as
6242 // possible.
6243 CompileTask* task;
6244 if (StubRoutines::aarch64::complete()
6245 && Thread::current()->is_Compiler_thread()
6246 && (task = ciEnv::current()->task())
6247 && is_c2_compile(task->comp_level())) {
6248 address tpc = trampoline_call(zero_blocks);
6249 if (tpc == nullptr) {
6250 DEBUG_ONLY(reset_labels(around));
6251 return nullptr;
6252 }
6253 } else {
6254 far_call(zero_blocks);
6255 }
6256 }
6257 bind(around);
6258
6259 // We have a few words left to do. zero_blocks has adjusted r10 and r11
6260 // for us.
6261 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
6262 Label l;
6263 tbz(cnt, exact_log2(i), l);
6264 for (int j = 0; j < i; j += 2) {
6265 stp(zr, zr, post(ptr, 2 * BytesPerWord));
6266 }
6267 bind(l);
6268 }
6269 {
6270 Label l;
6271 tbz(cnt, 0, l);
6272 str(zr, Address(ptr));
6273 bind(l);
6274 }
6275
6276 BLOCK_COMMENT("} zero_words");
6277 return pc();
6278 }
6279
6280 // base: Address of a buffer to be zeroed, 8 bytes aligned.
6281 // cnt: Immediate count in HeapWords.
6282 //
6283 // r10, r11, rscratch1, and rscratch2 are clobbered.
6284 address MacroAssembler::zero_words(Register base, uint64_t cnt)
6285 {
6286 assert(wordSize <= BlockZeroingLowLimit,
6287 "increase BlockZeroingLowLimit");
6288 address result = nullptr;
6289 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
6290 #ifndef PRODUCT
6291 {
6292 char buf[64];
6293 os::snprintf_checked(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
6294 BLOCK_COMMENT(buf);
6295 }
6296 #endif
6297 if (cnt >= 16) {
6298 uint64_t loops = cnt/16;
6299 if (loops > 1) {
6300 mov(rscratch2, loops - 1);
6301 }
6302 {
6303 Label loop;
6304 bind(loop);
6305 for (int i = 0; i < 16; i += 2) {
6306 stp(zr, zr, Address(base, i * BytesPerWord));
6307 }
6308 add(base, base, 16 * BytesPerWord);
6309 if (loops > 1) {
6310 subs(rscratch2, rscratch2, 1);
6311 br(GE, loop);
6312 }
6313 }
6314 }
6315 cnt %= 16;
6316 int i = cnt & 1; // store any odd word to start
6317 if (i) str(zr, Address(base));
6318 for (; i < (int)cnt; i += 2) {
6319 stp(zr, zr, Address(base, i * wordSize));
6320 }
6321 BLOCK_COMMENT("} zero_words");
6322 result = pc();
6323 } else {
6324 mov(r10, base); mov(r11, cnt);
6325 result = zero_words(r10, r11);
6326 }
6327 return result;
6328 }
6329
6330 // Zero blocks of memory by using DC ZVA.
6331 //
6332 // Aligns the base address first sufficiently for DC ZVA, then uses
6333 // DC ZVA repeatedly for every full block. cnt is the size to be
6334 // zeroed in HeapWords. Returns the count of words left to be zeroed
6335 // in cnt.
6336 //
6337 // NOTE: This is intended to be used in the zero_blocks() stub. If
6338 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
6339 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
6340 Register tmp = rscratch1;
6341 Register tmp2 = rscratch2;
6342 int zva_length = VM_Version::zva_length();
6343 Label initial_table_end, loop_zva;
6344 Label fini;
6345
6346 // Base must be 16 byte aligned. If not just return and let caller handle it
6347 tst(base, 0x0f);
6348 br(Assembler::NE, fini);
6349 // Align base with ZVA length.
6350 neg(tmp, base);
6351 andr(tmp, tmp, zva_length - 1);
6352
6353 // tmp: the number of bytes to be filled to align the base with ZVA length.
6354 add(base, base, tmp);
6355 sub(cnt, cnt, tmp, Assembler::ASR, 3);
6356 adr(tmp2, initial_table_end);
6357 sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
6358 br(tmp2);
6359
6360 for (int i = -zva_length + 16; i < 0; i += 16)
6361 stp(zr, zr, Address(base, i));
6362 bind(initial_table_end);
6363
6364 sub(cnt, cnt, zva_length >> 3);
6365 bind(loop_zva);
6366 dc(Assembler::ZVA, base);
6367 subs(cnt, cnt, zva_length >> 3);
6368 add(base, base, zva_length);
6369 br(Assembler::GE, loop_zva);
6370 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6371 bind(fini);
6372 }
6373
6374 // base: Address of a buffer to be filled, 8 bytes aligned.
6375 // cnt: Count in 8-byte unit.
6376 // value: Value to be filled with.
6377 // base will point to the end of the buffer after filling.
6378 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6379 {
6380 // Algorithm:
6381 //
6382 // if (cnt == 0) {
6383 // return;
6384 // }
6385 // if ((p & 8) != 0) {
6386 // *p++ = v;
6387 // }
6388 //
6389 // scratch1 = cnt & 14;
6390 // cnt -= scratch1;
6391 // p += scratch1;
6392 // switch (scratch1 / 2) {
6393 // do {
6394 // cnt -= 16;
6395 // p[-16] = v;
6396 // p[-15] = v;
6397 // case 7:
6398 // p[-14] = v;
6399 // p[-13] = v;
6400 // case 6:
6401 // p[-12] = v;
6402 // p[-11] = v;
6403 // // ...
6404 // case 1:
6405 // p[-2] = v;
6406 // p[-1] = v;
6407 // case 0:
6408 // p += 16;
6409 // } while (cnt);
6410 // }
6411 // if ((cnt & 1) == 1) {
6412 // *p++ = v;
6413 // }
6414
6415 assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6416
6417 Label fini, skip, entry, loop;
6418 const int unroll = 8; // Number of stp instructions we'll unroll
6419
6420 cbz(cnt, fini);
6421 tbz(base, 3, skip);
6422 str(value, Address(post(base, 8)));
6423 sub(cnt, cnt, 1);
6424 bind(skip);
6425
6426 andr(rscratch1, cnt, (unroll-1) * 2);
6427 sub(cnt, cnt, rscratch1);
6428 add(base, base, rscratch1, Assembler::LSL, 3);
6429 adr(rscratch2, entry);
6430 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6431 br(rscratch2);
6432
6433 bind(loop);
6434 add(base, base, unroll * 16);
6435 for (int i = -unroll; i < 0; i++)
6436 stp(value, value, Address(base, i * 16));
6437 bind(entry);
6438 subs(cnt, cnt, unroll * 2);
6439 br(Assembler::GE, loop);
6440
6441 tbz(cnt, 0, fini);
6442 str(value, Address(post(base, 8)));
6443 bind(fini);
6444 }
6445
6446 // Intrinsic for
6447 //
6448 // - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6449 // Encodes char[] to byte[] in ISO-8859-1
6450 //
6451 // - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6452 // Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
6453 //
6454 // - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6455 // Encodes char[] to byte[] in ASCII
6456 //
6457 // This version always returns the number of characters copied, and does not
6458 // clobber the 'len' register. A successful copy will complete with the post-
6459 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
6460 // post-condition: 0 <= 'res' < 'len'.
6461 //
6462 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6463 // degrade performance (on Ampere Altra - Neoverse N1), to an extent
6464 // beyond the acceptable, even though the footprint would be smaller.
6465 // Using 'umaxv' in the ASCII-case comes with a small penalty but does
6466 // avoid additional bloat.
6467 //
6468 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6469 void MacroAssembler::encode_iso_array(Register src, Register dst,
6470 Register len, Register res, bool ascii,
6471 FloatRegister vtmp0, FloatRegister vtmp1,
6472 FloatRegister vtmp2, FloatRegister vtmp3,
6473 FloatRegister vtmp4, FloatRegister vtmp5)
6474 {
6475 Register cnt = res;
6476 Register max = rscratch1;
6477 Register chk = rscratch2;
6478
6479 prfm(Address(src), PLDL1STRM);
6480 movw(cnt, len);
6481
6482 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
6483
6484 Label LOOP_32, DONE_32, FAIL_32;
6485
6486 BIND(LOOP_32);
6487 {
6488 cmpw(cnt, 32);
6489 br(LT, DONE_32);
6490 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6491 // Extract lower bytes.
6492 FloatRegister vlo0 = vtmp4;
6493 FloatRegister vlo1 = vtmp5;
6494 uzp1(vlo0, T16B, vtmp0, vtmp1);
6495 uzp1(vlo1, T16B, vtmp2, vtmp3);
6496 // Merge bits...
6497 orr(vtmp0, T16B, vtmp0, vtmp1);
6498 orr(vtmp2, T16B, vtmp2, vtmp3);
6499 // Extract merged upper bytes.
6500 FloatRegister vhix = vtmp0;
6501 uzp2(vhix, T16B, vtmp0, vtmp2);
6502 // ISO-check on hi-parts (all zero).
6503 // ASCII-check on lo-parts (no sign).
6504 FloatRegister vlox = vtmp1; // Merge lower bytes.
6505 ASCII(orr(vlox, T16B, vlo0, vlo1));
6506 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox));
6507 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox));
6508 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0));
6509 ASCII(orr(chk, chk, max));
6510 cbnz(chk, FAIL_32);
6511 subw(cnt, cnt, 32);
6512 st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6513 b(LOOP_32);
6514 }
6515 BIND(FAIL_32);
6516 sub(src, src, 64);
6517 BIND(DONE_32);
6518
6519 Label LOOP_8, SKIP_8;
6520
6521 BIND(LOOP_8);
6522 {
6523 cmpw(cnt, 8);
6524 br(LT, SKIP_8);
6525 FloatRegister vhi = vtmp0;
6526 FloatRegister vlo = vtmp1;
6527 ld1(vtmp3, T8H, src);
6528 uzp1(vlo, T16B, vtmp3, vtmp3);
6529 uzp2(vhi, T16B, vtmp3, vtmp3);
6530 // ISO-check on hi-parts (all zero).
6531 // ASCII-check on lo-parts (no sign).
6532 ASCII(cm(LT, vtmp2, T16B, vlo));
6533 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2));
6534 ASCII(umov(max, vtmp2, B, 0));
6535 ASCII(orr(chk, chk, max));
6536 cbnz(chk, SKIP_8);
6537
6538 strd(vlo, Address(post(dst, 8)));
6539 subw(cnt, cnt, 8);
6540 add(src, src, 16);
6541 b(LOOP_8);
6542 }
6543 BIND(SKIP_8);
6544
6545 #undef ASCII
6546
6547 Label LOOP, DONE;
6548
6549 cbz(cnt, DONE);
6550 BIND(LOOP);
6551 {
6552 Register chr = rscratch1;
6553 ldrh(chr, Address(post(src, 2)));
6554 tst(chr, ascii ? 0xff80 : 0xff00);
6555 br(NE, DONE);
6556 strb(chr, Address(post(dst, 1)));
6557 subs(cnt, cnt, 1);
6558 br(GT, LOOP);
6559 }
6560 BIND(DONE);
6561 // Return index where we stopped.
6562 subw(res, len, cnt);
6563 }
6564
6565 // Inflate byte[] array to char[].
6566 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6567 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6568 FloatRegister vtmp1, FloatRegister vtmp2,
6569 FloatRegister vtmp3, Register tmp4) {
6570 Label big, done, after_init, to_stub;
6571
6572 assert_different_registers(src, dst, len, tmp4, rscratch1);
6573
6574 fmovd(vtmp1, 0.0);
6575 lsrw(tmp4, len, 3);
6576 bind(after_init);
6577 cbnzw(tmp4, big);
6578 // Short string: less than 8 bytes.
6579 {
6580 Label loop, tiny;
6581
6582 cmpw(len, 4);
6583 br(LT, tiny);
6584 // Use SIMD to do 4 bytes.
6585 ldrs(vtmp2, post(src, 4));
6586 zip1(vtmp3, T8B, vtmp2, vtmp1);
6587 subw(len, len, 4);
6588 strd(vtmp3, post(dst, 8));
6589
6590 cbzw(len, done);
6591
6592 // Do the remaining bytes by steam.
6593 bind(loop);
6594 ldrb(tmp4, post(src, 1));
6595 strh(tmp4, post(dst, 2));
6596 subw(len, len, 1);
6597
6598 bind(tiny);
6599 cbnz(len, loop);
6600
6601 b(done);
6602 }
6603
6604 if (SoftwarePrefetchHintDistance >= 0) {
6605 bind(to_stub);
6606 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6607 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6608 address tpc = trampoline_call(stub);
6609 if (tpc == nullptr) {
6610 DEBUG_ONLY(reset_labels(big, done));
6611 postcond(pc() == badAddress);
6612 return nullptr;
6613 }
6614 b(after_init);
6615 }
6616
6617 // Unpack the bytes 8 at a time.
6618 bind(big);
6619 {
6620 Label loop, around, loop_last, loop_start;
6621
6622 if (SoftwarePrefetchHintDistance >= 0) {
6623 const int large_loop_threshold = (64 + 16)/8;
6624 ldrd(vtmp2, post(src, 8));
6625 andw(len, len, 7);
6626 cmp(tmp4, (u1)large_loop_threshold);
6627 br(GE, to_stub);
6628 b(loop_start);
6629
6630 bind(loop);
6631 ldrd(vtmp2, post(src, 8));
6632 bind(loop_start);
6633 subs(tmp4, tmp4, 1);
6634 br(EQ, loop_last);
6635 zip1(vtmp2, T16B, vtmp2, vtmp1);
6636 ldrd(vtmp3, post(src, 8));
6637 st1(vtmp2, T8H, post(dst, 16));
6638 subs(tmp4, tmp4, 1);
6639 zip1(vtmp3, T16B, vtmp3, vtmp1);
6640 st1(vtmp3, T8H, post(dst, 16));
6641 br(NE, loop);
6642 b(around);
6643 bind(loop_last);
6644 zip1(vtmp2, T16B, vtmp2, vtmp1);
6645 st1(vtmp2, T8H, post(dst, 16));
6646 bind(around);
6647 cbz(len, done);
6648 } else {
6649 andw(len, len, 7);
6650 bind(loop);
6651 ldrd(vtmp2, post(src, 8));
6652 sub(tmp4, tmp4, 1);
6653 zip1(vtmp3, T16B, vtmp2, vtmp1);
6654 st1(vtmp3, T8H, post(dst, 16));
6655 cbnz(tmp4, loop);
6656 }
6657 }
6658
6659 // Do the tail of up to 8 bytes.
6660 add(src, src, len);
6661 ldrd(vtmp3, Address(src, -8));
6662 add(dst, dst, len, ext::uxtw, 1);
6663 zip1(vtmp3, T16B, vtmp3, vtmp1);
6664 strq(vtmp3, Address(dst, -16));
6665
6666 bind(done);
6667 postcond(pc() != badAddress);
6668 return pc();
6669 }
6670
6671 // Compress char[] array to byte[].
6672 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
6673 // Return the array length if every element in array can be encoded,
6674 // otherwise, the index of first non-latin1 (> 0xff) character.
6675 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
6676 Register res,
6677 FloatRegister tmp0, FloatRegister tmp1,
6678 FloatRegister tmp2, FloatRegister tmp3,
6679 FloatRegister tmp4, FloatRegister tmp5) {
6680 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
6681 }
6682
6683 // java.math.round(double a)
6684 // Returns the closest long to the argument, with ties rounding to
6685 // positive infinity. This requires some fiddling for corner
6686 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
6687 void MacroAssembler::java_round_double(Register dst, FloatRegister src,
6688 FloatRegister ftmp) {
6689 Label DONE;
6690 BLOCK_COMMENT("java_round_double: { ");
6691 fmovd(rscratch1, src);
6692 // Use RoundToNearestTiesAway unless src small and -ve.
6693 fcvtasd(dst, src);
6694 // Test if src >= 0 || abs(src) >= 0x1.0p52
6695 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
6696 mov(rscratch2, julong_cast(0x1.0p52));
6697 cmp(rscratch1, rscratch2);
6698 br(HS, DONE); {
6699 // src < 0 && abs(src) < 0x1.0p52
6700 // src may have a fractional part, so add 0.5
6701 fmovd(ftmp, 0.5);
6702 faddd(ftmp, src, ftmp);
6703 // Convert double to jlong, use RoundTowardsNegative
6704 fcvtmsd(dst, ftmp);
6705 }
6706 bind(DONE);
6707 BLOCK_COMMENT("} java_round_double");
6708 }
6709
6710 void MacroAssembler::java_round_float(Register dst, FloatRegister src,
6711 FloatRegister ftmp) {
6712 Label DONE;
6713 BLOCK_COMMENT("java_round_float: { ");
6714 fmovs(rscratch1, src);
6715 // Use RoundToNearestTiesAway unless src small and -ve.
6716 fcvtassw(dst, src);
6717 // Test if src >= 0 || abs(src) >= 0x1.0p23
6718 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
6719 mov(rscratch2, jint_cast(0x1.0p23f));
6720 cmp(rscratch1, rscratch2);
6721 br(HS, DONE); {
6722 // src < 0 && |src| < 0x1.0p23
6723 // src may have a fractional part, so add 0.5
6724 fmovs(ftmp, 0.5f);
6725 fadds(ftmp, src, ftmp);
6726 // Convert float to jint, use RoundTowardsNegative
6727 fcvtmssw(dst, ftmp);
6728 }
6729 bind(DONE);
6730 BLOCK_COMMENT("} java_round_float");
6731 }
6732
6733 // get_thread() can be called anywhere inside generated code so we
6734 // need to save whatever non-callee save context might get clobbered
6735 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
6736 // the call setup code.
6737 //
6738 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
6739 // On other systems, the helper is a usual C function.
6740 //
6741 void MacroAssembler::get_thread(Register dst) {
6742 RegSet saved_regs =
6743 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6744 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6745
6746 protect_return_address();
6747 push(saved_regs, sp);
6748
6749 mov(lr, ExternalAddress(CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)));
6750 blr(lr);
6751 if (dst != c_rarg0) {
6752 mov(dst, c_rarg0);
6753 }
6754
6755 pop(saved_regs, sp);
6756 authenticate_return_address();
6757 }
6758
6759 void MacroAssembler::cache_wb(Address line) {
6760 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6761 assert(line.index() == noreg, "index should be noreg");
6762 assert(line.offset() == 0, "offset should be 0");
6763 // would like to assert this
6764 // assert(line._ext.shift == 0, "shift should be zero");
6765 if (VM_Version::supports_dcpop()) {
6766 // writeback using clear virtual address to point of persistence
6767 dc(Assembler::CVAP, line.base());
6768 } else {
6769 // no need to generate anything as Unsafe.writebackMemory should
6770 // never invoke this stub
6771 }
6772 }
6773
6774 void MacroAssembler::cache_wbsync(bool is_pre) {
6775 // we only need a barrier post sync
6776 if (!is_pre) {
6777 membar(Assembler::AnyAny);
6778 }
6779 }
6780
6781 void MacroAssembler::verify_sve_vector_length(Register tmp) {
6782 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) {
6783 return;
6784 }
6785 // Make sure that native code does not change SVE vector length.
6786 Label verify_ok;
6787 movw(tmp, zr);
6788 sve_inc(tmp, B);
6789 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
6790 br(EQ, verify_ok);
6791 stop("Error: SVE vector length has changed since jvm startup");
6792 bind(verify_ok);
6793 }
6794
6795 void MacroAssembler::verify_ptrue() {
6796 Label verify_ok;
6797 if (!UseSVE) {
6798 return;
6799 }
6800 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
6801 sve_dec(rscratch1, B);
6802 cbz(rscratch1, verify_ok);
6803 stop("Error: the preserved predicate register (p7) elements are not all true");
6804 bind(verify_ok);
6805 }
6806
6807 void MacroAssembler::safepoint_isb() {
6808 isb();
6809 #ifndef PRODUCT
6810 if (VerifyCrossModifyFence) {
6811 // Clear the thread state.
6812 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6813 }
6814 #endif
6815 }
6816
6817 #ifndef PRODUCT
6818 void MacroAssembler::verify_cross_modify_fence_not_required() {
6819 if (VerifyCrossModifyFence) {
6820 // Check if thread needs a cross modify fence.
6821 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6822 Label fence_not_required;
6823 cbz(rscratch1, fence_not_required);
6824 // If it does then fail.
6825 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
6826 mov(c_rarg0, rthread);
6827 blr(rscratch1);
6828 bind(fence_not_required);
6829 }
6830 }
6831 #endif
6832
6833 void MacroAssembler::spin_wait() {
6834 block_comment("spin_wait {");
6835 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
6836 switch (VM_Version::spin_wait_desc().inst()) {
6837 case SpinWait::NOP:
6838 nop();
6839 break;
6840 case SpinWait::ISB:
6841 isb();
6842 break;
6843 case SpinWait::YIELD:
6844 yield();
6845 break;
6846 case SpinWait::SB:
6847 assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
6848 sb();
6849 break;
6850 case SpinWait::WFET:
6851 spin_wait_wfet(VM_Version::spin_wait_desc().delay());
6852 break;
6853 default:
6854 ShouldNotReachHere();
6855 }
6856 }
6857 block_comment("}");
6858 }
6859
6860 void MacroAssembler::spin_wait_wfet(int delay_ns) {
6861 // The sequence assumes CNTFRQ_EL0 is fixed to 1GHz. The assumption is valid
6862 // starting from Armv8.6, according to the "D12.1.2 The system counter" of the
6863 // Arm Architecture Reference Manual for A-profile architecture version M.a.a.
6864 // This is sufficient because FEAT_WFXT is introduced from Armv8.6.
6865 Register target = rscratch1;
6866 Register current = rscratch2;
6867 get_cntvctss_el0(current);
6868 add(target, current, delay_ns);
6869
6870 Label L_wait_loop;
6871 bind(L_wait_loop);
6872
6873 wfet(target);
6874 get_cntvctss_el0(current);
6875
6876 cmp(current, target);
6877 br(LT, L_wait_loop);
6878
6879 sb();
6880 }
6881
6882 // Stack frame creation/removal
6883
6884 void MacroAssembler::enter(bool strip_ret_addr) {
6885 if (strip_ret_addr) {
6886 // Addresses can only be signed once. If there are multiple nested frames being created
6887 // in the same function, then the return address needs stripping first.
6888 strip_return_address();
6889 }
6890 protect_return_address();
6891 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6892 mov(rfp, sp);
6893 }
6894
6895 void MacroAssembler::leave() {
6896 mov(sp, rfp);
6897 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6898 authenticate_return_address();
6899 }
6900
6901 // ROP Protection
6902 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
6903 // destroying stack frames or whenever directly loading/storing the LR to memory.
6904 // If ROP protection is not set then these functions are no-ops.
6905 // For more details on PAC see pauth_aarch64.hpp.
6906
6907 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
6908 // Uses value zero as the modifier.
6909 //
6910 void MacroAssembler::protect_return_address() {
6911 if (VM_Version::use_rop_protection()) {
6912 check_return_address();
6913 paciaz();
6914 }
6915 }
6916
6917 // Sign the return value in the given register. Use before updating the LR in the existing stack
6918 // frame for the current function.
6919 // Uses value zero as the modifier.
6920 //
6921 void MacroAssembler::protect_return_address(Register return_reg) {
6922 if (VM_Version::use_rop_protection()) {
6923 check_return_address(return_reg);
6924 paciza(return_reg);
6925 }
6926 }
6927
6928 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
6929 // Uses value zero as the modifier.
6930 //
6931 void MacroAssembler::authenticate_return_address() {
6932 if (VM_Version::use_rop_protection()) {
6933 autiaz();
6934 check_return_address();
6935 }
6936 }
6937
6938 // Authenticate the return value in the given register. Use before updating the LR in the existing
6939 // stack frame for the current function.
6940 // Uses value zero as the modifier.
6941 //
6942 void MacroAssembler::authenticate_return_address(Register return_reg) {
6943 if (VM_Version::use_rop_protection()) {
6944 autiza(return_reg);
6945 check_return_address(return_reg);
6946 }
6947 }
6948
6949 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
6950 // there is no guaranteed way of authenticating the LR.
6951 //
6952 void MacroAssembler::strip_return_address() {
6953 if (VM_Version::use_rop_protection()) {
6954 xpaclri();
6955 }
6956 }
6957
6958 #ifndef PRODUCT
6959 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
6960 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
6961 // it is difficult to debug back to the callee function.
6962 // This function simply loads from the address in the given register.
6963 // Use directly after authentication to catch authentication failures.
6964 // Also use before signing to check that the pointer is valid and hasn't already been signed.
6965 //
6966 void MacroAssembler::check_return_address(Register return_reg) {
6967 if (VM_Version::use_rop_protection()) {
6968 ldr(zr, Address(return_reg));
6969 }
6970 }
6971 #endif
6972
6973 // The java_calling_convention describes stack locations as ideal slots on
6974 // a frame with no abi restrictions. Since we must observe abi restrictions
6975 // (like the placement of the register window) the slots must be biased by
6976 // the following value.
6977 static int reg2offset_in(VMReg r) {
6978 // Account for saved rfp and lr
6979 // This should really be in_preserve_stack_slots
6980 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
6981 }
6982
6983 static int reg2offset_out(VMReg r) {
6984 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
6985 }
6986
6987 // On 64bit we will store integer like items to the stack as
6988 // 64bits items (AArch64 ABI) even though java would only store
6989 // 32bits for a parameter. On 32bit it will simply be 32bits
6990 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
6991 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
6992 if (src.first()->is_stack()) {
6993 if (dst.first()->is_stack()) {
6994 // stack to stack
6995 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6996 str(tmp, Address(sp, reg2offset_out(dst.first())));
6997 } else {
6998 // stack to reg
6999 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7000 }
7001 } else if (dst.first()->is_stack()) {
7002 // reg to stack
7003 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7004 } else {
7005 if (dst.first() != src.first()) {
7006 sxtw(dst.first()->as_Register(), src.first()->as_Register());
7007 }
7008 }
7009 }
7010
7011 // An oop arg. Must pass a handle not the oop itself
7012 void MacroAssembler::object_move(
7013 OopMap* map,
7014 int oop_handle_offset,
7015 int framesize_in_slots,
7016 VMRegPair src,
7017 VMRegPair dst,
7018 bool is_receiver,
7019 int* receiver_offset) {
7020
7021 // must pass a handle. First figure out the location we use as a handle
7022
7023 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
7024
7025 // See if oop is null if it is we need no handle
7026
7027 if (src.first()->is_stack()) {
7028
7029 // Oop is already on the stack as an argument
7030 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
7031 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
7032 if (is_receiver) {
7033 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
7034 }
7035
7036 ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
7037 lea(rHandle, Address(rfp, reg2offset_in(src.first())));
7038 // conditionally move a null
7039 cmp(rscratch1, zr);
7040 csel(rHandle, zr, rHandle, Assembler::EQ);
7041 } else {
7042
7043 // Oop is in an a register we must store it to the space we reserve
7044 // on the stack for oop_handles and pass a handle if oop is non-null
7045
7046 const Register rOop = src.first()->as_Register();
7047 int oop_slot;
7048 if (rOop == j_rarg0)
7049 oop_slot = 0;
7050 else if (rOop == j_rarg1)
7051 oop_slot = 1;
7052 else if (rOop == j_rarg2)
7053 oop_slot = 2;
7054 else if (rOop == j_rarg3)
7055 oop_slot = 3;
7056 else if (rOop == j_rarg4)
7057 oop_slot = 4;
7058 else if (rOop == j_rarg5)
7059 oop_slot = 5;
7060 else if (rOop == j_rarg6)
7061 oop_slot = 6;
7062 else {
7063 assert(rOop == j_rarg7, "wrong register");
7064 oop_slot = 7;
7065 }
7066
7067 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
7068 int offset = oop_slot*VMRegImpl::stack_slot_size;
7069
7070 map->set_oop(VMRegImpl::stack2reg(oop_slot));
7071 // Store oop in handle area, may be null
7072 str(rOop, Address(sp, offset));
7073 if (is_receiver) {
7074 *receiver_offset = offset;
7075 }
7076
7077 cmp(rOop, zr);
7078 lea(rHandle, Address(sp, offset));
7079 // conditionally move a null
7080 csel(rHandle, zr, rHandle, Assembler::EQ);
7081 }
7082
7083 // If arg is on the stack then place it otherwise it is already in correct reg.
7084 if (dst.first()->is_stack()) {
7085 str(rHandle, Address(sp, reg2offset_out(dst.first())));
7086 }
7087 }
7088
7089 // A float arg may have to do float reg int reg conversion
7090 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
7091 if (src.first()->is_stack()) {
7092 if (dst.first()->is_stack()) {
7093 ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
7094 strw(tmp, Address(sp, reg2offset_out(dst.first())));
7095 } else {
7096 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7097 }
7098 } else if (src.first() != dst.first()) {
7099 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7100 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7101 else
7102 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7103 }
7104 }
7105
7106 // A long move
7107 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
7108 if (src.first()->is_stack()) {
7109 if (dst.first()->is_stack()) {
7110 // stack to stack
7111 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7112 str(tmp, Address(sp, reg2offset_out(dst.first())));
7113 } else {
7114 // stack to reg
7115 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7116 }
7117 } else if (dst.first()->is_stack()) {
7118 // reg to stack
7119 // Do we really have to sign extend???
7120 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
7121 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7122 } else {
7123 if (dst.first() != src.first()) {
7124 mov(dst.first()->as_Register(), src.first()->as_Register());
7125 }
7126 }
7127 }
7128
7129
7130 // A double move
7131 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
7132 if (src.first()->is_stack()) {
7133 if (dst.first()->is_stack()) {
7134 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7135 str(tmp, Address(sp, reg2offset_out(dst.first())));
7136 } else {
7137 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7138 }
7139 } else if (src.first() != dst.first()) {
7140 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7141 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7142 else
7143 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7144 }
7145 }
7146
7147 // Implements fast-locking.
7148 //
7149 // - obj: the object to be locked
7150 // - t1, t2, t3: temporary registers, will be destroyed
7151 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
7152 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
7153 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
7154
7155 Label push;
7156 const Register top = t1;
7157 const Register mark = t2;
7158 const Register t = t3;
7159
7160 // Preload the markWord. It is important that this is the first
7161 // instruction emitted as it is part of C1's null check semantics.
7162 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7163
7164 if (UseObjectMonitorTable) {
7165 // Clear cache in case fast locking succeeds or we need to take the slow-path.
7166 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
7167 }
7168
7169 if (DiagnoseSyncOnValueBasedClasses != 0) {
7170 load_klass(t1, obj);
7171 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
7172 tst(t1, KlassFlags::_misc_is_value_based_class);
7173 br(Assembler::NE, slow);
7174 }
7175
7176 // Check if the lock-stack is full.
7177 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7178 cmpw(top, (unsigned)LockStack::end_offset());
7179 br(Assembler::GE, slow);
7180
7181 // Check for recursion.
7182 subw(t, top, oopSize);
7183 ldr(t, Address(rthread, t));
7184 cmp(obj, t);
7185 br(Assembler::EQ, push);
7186
7187 // Check header for monitor (0b10).
7188 tst(mark, markWord::monitor_value);
7189 br(Assembler::NE, slow);
7190
7191 // Try to lock. Transition lock bits 0b01 => 0b00
7192 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7193 orr(mark, mark, markWord::unlocked_value);
7194 eor(t, mark, markWord::unlocked_value);
7195 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
7196 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
7197 br(Assembler::NE, slow);
7198
7199 bind(push);
7200 // After successful lock, push object on lock-stack.
7201 str(obj, Address(rthread, top));
7202 addw(top, top, oopSize);
7203 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7204 }
7205
7206 // Implements fast-unlocking.
7207 //
7208 // - obj: the object to be unlocked
7209 // - t1, t2, t3: temporary registers
7210 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
7211 void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
7212 // cmpxchg clobbers rscratch1.
7213 assert_different_registers(obj, t1, t2, t3, rscratch1);
7214
7215 #ifdef ASSERT
7216 {
7217 // Check for lock-stack underflow.
7218 Label stack_ok;
7219 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7220 cmpw(t1, (unsigned)LockStack::start_offset());
7221 br(Assembler::GE, stack_ok);
7222 STOP("Lock-stack underflow");
7223 bind(stack_ok);
7224 }
7225 #endif
7226
7227 Label unlocked, push_and_slow;
7228 const Register top = t1;
7229 const Register mark = t2;
7230 const Register t = t3;
7231
7232 // Check if obj is top of lock-stack.
7233 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7234 subw(top, top, oopSize);
7235 ldr(t, Address(rthread, top));
7236 cmp(obj, t);
7237 br(Assembler::NE, slow);
7238
7239 // Pop lock-stack.
7240 DEBUG_ONLY(str(zr, Address(rthread, top));)
7241 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7242
7243 // Check if recursive.
7244 subw(t, top, oopSize);
7245 ldr(t, Address(rthread, t));
7246 cmp(obj, t);
7247 br(Assembler::EQ, unlocked);
7248
7249 // Not recursive. Check header for monitor (0b10).
7250 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7251 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
7252
7253 #ifdef ASSERT
7254 // Check header not unlocked (0b01).
7255 Label not_unlocked;
7256 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
7257 stop("fast_unlock already unlocked");
7258 bind(not_unlocked);
7259 #endif
7260
7261 // Try to unlock. Transition lock bits 0b00 => 0b01
7262 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7263 orr(t, mark, markWord::unlocked_value);
7264 cmpxchg(obj, mark, t, Assembler::xword,
7265 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
7266 br(Assembler::EQ, unlocked);
7267
7268 bind(push_and_slow);
7269 // Restore lock-stack and handle the unlock in runtime.
7270 DEBUG_ONLY(str(obj, Address(rthread, top));)
7271 addw(top, top, oopSize);
7272 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7273 b(slow);
7274
7275 bind(unlocked);
7276 }