1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/icache.hpp"
52 #include "runtime/interfaceSupport.inline.hpp"
53 #include "runtime/javaThread.hpp"
54 #include "runtime/jniHandles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "utilities/globalDefinitions.hpp"
58 #include "utilities/powerOfTwo.hpp"
59 #ifdef COMPILER1
60 #include "c1/c1_LIRAssembler.hpp"
61 #endif
62 #ifdef COMPILER2
63 #include "oops/oop.hpp"
64 #include "opto/compile.hpp"
65 #include "opto/node.hpp"
66 #include "opto/output.hpp"
67 #endif
68
69 #include <sys/types.h>
70
71 #ifdef PRODUCT
72 #define BLOCK_COMMENT(str) /* nothing */
73 #else
74 #define BLOCK_COMMENT(str) block_comment(str)
75 #endif
76 #define STOP(str) stop(str);
77 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
78
79 #ifdef ASSERT
80 extern "C" void disnm(intptr_t p);
81 #endif
82 // Target-dependent relocation processing
83 //
84 // Instruction sequences whose target may need to be retrieved or
85 // patched are distinguished by their leading instruction, sorting
86 // them into three main instruction groups and related subgroups.
87 //
88 // 1) Branch, Exception and System (insn count = 1)
89 // 1a) Unconditional branch (immediate):
90 // b/bl imm19
91 // 1b) Compare & branch (immediate):
92 // cbz/cbnz Rt imm19
93 // 1c) Test & branch (immediate):
94 // tbz/tbnz Rt imm14
95 // 1d) Conditional branch (immediate):
96 // b.cond imm19
97 //
98 // 2) Loads and Stores (insn count = 1)
99 // 2a) Load register literal:
100 // ldr Rt imm19
101 //
102 // 3) Data Processing Immediate (insn count = 2 or 3)
103 // 3a) PC-rel. addressing
104 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12
105 // adr/adrp Rx imm21; add Ry Rx #imm12
106 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
107 // adr/adrp Rx imm21
108 // adr/adrp Rx imm21; movk Rx #imm16<<32
109 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
110 // The latter form can only happen when the target is an
111 // ExternalAddress, and (by definition) ExternalAddresses don't
112 // move. Because of that property, there is never any need to
113 // patch the last of the three instructions. However,
114 // MacroAssembler::target_addr_for_insn takes all three
115 // instructions into account and returns the correct address.
116 // 3b) Move wide (immediate)
117 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
118 //
119 // A switch on a subset of the instruction's bits provides an
120 // efficient dispatch to these subcases.
121 //
122 // insn[28:26] -> main group ('x' == don't care)
123 // 00x -> UNALLOCATED
124 // 100 -> Data Processing Immediate
125 // 101 -> Branch, Exception and System
126 // x1x -> Loads and Stores
127 //
128 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
129 // n.b. in some cases extra bits need to be checked to verify the
130 // instruction is as expected
131 //
132 // 1) ... xx101x Branch, Exception and System
133 // 1a) 00___x Unconditional branch (immediate)
134 // 1b) 01___0 Compare & branch (immediate)
135 // 1c) 01___1 Test & branch (immediate)
136 // 1d) 10___0 Conditional branch (immediate)
137 // other Should not happen
138 //
139 // 2) ... xxx1x0 Loads and Stores
140 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
141 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
142 // strictly should be 64 bit non-FP/SIMD i.e.
143 // 0101_000 (i.e. requires insn[31:24] == 01011000)
144 //
145 // 3) ... xx100x Data Processing Immediate
146 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
147 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
148 // strictly should be 64 bit movz #imm16<<0
149 // 110___10100 (i.e. requires insn[31:21] == 11010010100)
150 //
151
152 static uint32_t insn_at(address insn_addr, int n) {
153 return ((uint32_t*)insn_addr)[n];
154 }
155
156 template<typename T>
157 class RelocActions : public AllStatic {
158
159 public:
160
161 static int ALWAYSINLINE run(address insn_addr, address &target) {
162 int instructions = 1;
163 uint32_t insn = insn_at(insn_addr, 0);
164
165 uint32_t dispatch = Instruction_aarch64::extract(insn, 30, 25);
166 switch(dispatch) {
167 case 0b001010:
168 case 0b001011: {
169 instructions = T::unconditionalBranch(insn_addr, target);
170 break;
171 }
172 case 0b101010: // Conditional branch (immediate)
173 case 0b011010: { // Compare & branch (immediate)
174 instructions = T::conditionalBranch(insn_addr, target);
175 break;
176 }
177 case 0b011011: {
178 instructions = T::testAndBranch(insn_addr, target);
179 break;
180 }
181 case 0b001100:
182 case 0b001110:
183 case 0b011100:
184 case 0b011110:
185 case 0b101100:
186 case 0b101110:
187 case 0b111100:
188 case 0b111110: {
189 // load/store
190 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
191 // Load register (literal)
192 instructions = T::loadStore(insn_addr, target);
193 break;
194 } else {
195 // nothing to do
196 assert(target == nullptr, "did not expect to relocate target for polling page load");
197 }
198 break;
199 }
200 case 0b001000:
201 case 0b011000:
202 case 0b101000:
203 case 0b111000: {
204 // adr/adrp
205 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
206 int shift = Instruction_aarch64::extract(insn, 31, 31);
207 if (shift) {
208 uint32_t insn2 = insn_at(insn_addr, 1);
209 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
210 Instruction_aarch64::extract(insn, 4, 0) ==
211 Instruction_aarch64::extract(insn2, 9, 5)) {
212 instructions = T::adrp(insn_addr, target, T::adrpMem);
213 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
214 Instruction_aarch64::extract(insn, 4, 0) ==
215 Instruction_aarch64::extract(insn2, 4, 0)) {
216 instructions = T::adrp(insn_addr, target, T::adrpAdd);
217 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
218 Instruction_aarch64::extract(insn, 4, 0) ==
219 Instruction_aarch64::extract(insn2, 4, 0)) {
220 instructions = T::adrp(insn_addr, target, T::adrpMovk);
221 } else {
222 ShouldNotReachHere();
223 }
224 } else {
225 instructions = T::adr(insn_addr, target);
226 }
227 break;
228 }
229 case 0b001001:
230 case 0b011001:
231 case 0b101001:
232 case 0b111001: {
233 instructions = T::immediate(insn_addr, target);
234 break;
235 }
236 default: {
237 ShouldNotReachHere();
238 }
239 }
240
241 T::verify(insn_addr, target);
242 return instructions * NativeInstruction::instruction_size;
243 }
244 };
245
246 class Patcher : public AllStatic {
247 public:
248 static int unconditionalBranch(address insn_addr, address &target) {
249 intptr_t offset = (target - insn_addr) >> 2;
250 Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
251 return 1;
252 }
253 static int conditionalBranch(address insn_addr, address &target) {
254 intptr_t offset = (target - insn_addr) >> 2;
255 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
256 return 1;
257 }
258 static int testAndBranch(address insn_addr, address &target) {
259 intptr_t offset = (target - insn_addr) >> 2;
260 Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
261 return 1;
262 }
263 static int loadStore(address insn_addr, address &target) {
264 intptr_t offset = (target - insn_addr) >> 2;
265 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
266 return 1;
267 }
268 static int adr(address insn_addr, address &target) {
269 #ifdef ASSERT
270 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
271 #endif
272 // PC-rel. addressing
273 ptrdiff_t offset = target - insn_addr;
274 int offset_lo = offset & 3;
275 offset >>= 2;
276 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
277 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
278 return 1;
279 }
280 template<typename U>
281 static int adrp(address insn_addr, address &target, U inner) {
282 int instructions = 1;
283 #ifdef ASSERT
284 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
285 #endif
286 ptrdiff_t offset = target - insn_addr;
287 instructions = 2;
288 precond(inner != nullptr);
289 // Give the inner reloc a chance to modify the target.
290 address adjusted_target = target;
291 instructions = inner(insn_addr, adjusted_target);
292 uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
293 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
294 offset = adr_page - pc_page;
295 int offset_lo = offset & 3;
296 offset >>= 2;
297 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
298 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
299 return instructions;
300 }
301 static int adrpMem(address insn_addr, address &target) {
302 uintptr_t dest = (uintptr_t)target;
303 int offset_lo = dest & 0xfff;
304 uint32_t insn2 = insn_at(insn_addr, 1);
305 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
306 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
307 guarantee(((dest >> size) << size) == dest, "misaligned target");
308 return 2;
309 }
310 static int adrpAdd(address insn_addr, address &target) {
311 uintptr_t dest = (uintptr_t)target;
312 int offset_lo = dest & 0xfff;
313 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
314 return 2;
315 }
316 static int adrpMovk(address insn_addr, address &target) {
317 uintptr_t dest = uintptr_t(target);
318 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
319 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
320 target = address(dest);
321 return 2;
322 }
323 static int immediate(address insn_addr, address &target) {
324 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
325 uint64_t dest = (uint64_t)target;
326 // Move wide constant
327 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
328 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
329 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
330 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
331 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
332 return 3;
333 }
334 static void verify(address insn_addr, address &target) {
335 #ifdef ASSERT
336 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
337 if (!(address_is == target)) {
338 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
339 disnm((intptr_t)insn_addr);
340 assert(address_is == target, "should be");
341 }
342 #endif
343 }
344 };
345
346 // If insn1 and insn2 use the same register to form an address, either
347 // by an offsetted LDR or a simple ADD, return the offset. If the
348 // second instruction is an LDR, the offset may be scaled.
349 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
350 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
351 Instruction_aarch64::extract(insn1, 4, 0) ==
352 Instruction_aarch64::extract(insn2, 9, 5)) {
353 // Load/store register (unsigned immediate)
354 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
355 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
356 byte_offset <<= size;
357 return true;
358 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
359 Instruction_aarch64::extract(insn1, 4, 0) ==
360 Instruction_aarch64::extract(insn2, 4, 0)) {
361 // add (immediate)
362 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
363 return true;
364 }
365 return false;
366 }
367
368 class AArch64Decoder : public AllStatic {
369 public:
370
371 static int loadStore(address insn_addr, address &target) {
372 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
373 target = insn_addr + (offset << 2);
374 return 1;
375 }
376 static int unconditionalBranch(address insn_addr, address &target) {
377 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 25, 0);
378 target = insn_addr + (offset << 2);
379 return 1;
380 }
381 static int conditionalBranch(address insn_addr, address &target) {
382 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
383 target = address(((uint64_t)insn_addr + (offset << 2)));
384 return 1;
385 }
386 static int testAndBranch(address insn_addr, address &target) {
387 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 18, 5);
388 target = address(((uint64_t)insn_addr + (offset << 2)));
389 return 1;
390 }
391 static int adr(address insn_addr, address &target) {
392 // PC-rel. addressing
393 uint32_t insn = insn_at(insn_addr, 0);
394 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
395 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
396 target = address((uint64_t)insn_addr + offset);
397 return 1;
398 }
399 template<typename U>
400 static int adrp(address insn_addr, address &target, U inner) {
401 uint32_t insn = insn_at(insn_addr, 0);
402 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
403 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
404 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
405 int shift = 12;
406 offset <<= shift;
407 uint64_t target_page = ((uint64_t)insn_addr) + offset;
408 target_page &= ((uint64_t)-1) << shift;
409 target = address(target_page);
410 precond(inner != nullptr);
411 inner(insn_addr, target);
412 return 2;
413 }
414 static int adrpMem(address insn_addr, address &target) {
415 uint32_t insn2 = insn_at(insn_addr, 1);
416 // Load/store register (unsigned immediate)
417 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
418 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
419 byte_offset <<= size;
420 target += byte_offset;
421 return 2;
422 }
423 static int adrpAdd(address insn_addr, address &target) {
424 uint32_t insn2 = insn_at(insn_addr, 1);
425 // add (immediate)
426 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
427 target += byte_offset;
428 return 2;
429 }
430 static int adrpMovk(address insn_addr, address &target) {
431 uint32_t insn2 = insn_at(insn_addr, 1);
432 uint64_t dest = uint64_t(target);
433 dest = (dest & 0xffff0000ffffffff) |
434 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
435 target = address(dest);
436
437 // We know the destination 4k page. Maybe we have a third
438 // instruction.
439 uint32_t insn = insn_at(insn_addr, 0);
440 uint32_t insn3 = insn_at(insn_addr, 2);
441 ptrdiff_t byte_offset;
442 if (offset_for(insn, insn3, byte_offset)) {
443 target += byte_offset;
444 return 3;
445 } else {
446 return 2;
447 }
448 }
449 static int immediate(address insn_addr, address &target) {
450 uint32_t *insns = (uint32_t *)insn_addr;
451 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
452 // Move wide constant: movz, movk, movk. See movptr().
453 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
454 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
455 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
456 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
457 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
458 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
459 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
460 return 3;
461 }
462 static void verify(address insn_addr, address &target) {
463 }
464 };
465
466 address MacroAssembler::target_addr_for_insn(address insn_addr) {
467 address target;
468 RelocActions<AArch64Decoder>::run(insn_addr, target);
469 return target;
470 }
471
472 // Patch any kind of instruction; there may be several instructions.
473 // Return the total length (in bytes) of the instructions.
474 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
475 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
476 return RelocActions<Patcher>::run(insn_addr, target);
477 }
478
479 int MacroAssembler::patch_oop(address insn_addr, address o) {
480 int instructions;
481 unsigned insn = *(unsigned*)insn_addr;
482 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
483
484 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
485
486 // OOPs are either narrow (32 bits) or wide (48 bits). We encode
487 // narrow OOPs by setting the upper 16 bits in the first
488 // instruction.
489 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
490 // Move narrow OOP
491 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
492 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
493 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
494 instructions = 2;
495 } else {
496 // Move wide OOP
497 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
498 uintptr_t dest = (uintptr_t)o;
499 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
500 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
501 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
502 instructions = 3;
503 }
504 return instructions * NativeInstruction::instruction_size;
505 }
506
507 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
508 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
509 // We encode narrow ones by setting the upper 16 bits in the first
510 // instruction.
511 NativeInstruction *insn = nativeInstruction_at(insn_addr);
512 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
513 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
514
515 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
516
517 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
518 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
519 return 2 * NativeInstruction::instruction_size;
520 }
521
522 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
523 ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
524 if (at_return) {
525 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
526 // we may safely use the sp instead to perform the stack watermark check.
527 cmp(in_nmethod ? sp : rfp, tmp);
528 br(Assembler::HI, slow_path);
529 } else {
530 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
531 }
532 }
533
534 void MacroAssembler::rt_call(address dest, Register tmp) {
535 CodeBlob *cb = CodeCache::find_blob(dest);
536 if (cb) {
537 far_call(RuntimeAddress(dest));
538 } else {
539 lea(tmp, RuntimeAddress(dest));
540 blr(tmp);
541 }
542 }
543
544 void MacroAssembler::push_cont_fastpath(Register java_thread) {
545 if (!Continuations::enabled()) return;
546 Label done;
547 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
548 cmp(sp, rscratch1);
549 br(Assembler::LS, done);
550 mov(rscratch1, sp); // we can't use sp as the source in str
551 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
552 bind(done);
553 }
554
555 void MacroAssembler::pop_cont_fastpath(Register java_thread) {
556 if (!Continuations::enabled()) return;
557 Label done;
558 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
559 cmp(sp, rscratch1);
560 br(Assembler::LO, done);
561 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
562 bind(done);
563 }
564
565 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
566 // we must set sp to zero to clear frame
567 str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
568
569 // must clear fp, so that compiled frames are not confused; it is
570 // possible that we need it only for debugging
571 if (clear_fp) {
572 str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
573 }
574
575 // Always clear the pc because it could have been set by make_walkable()
576 str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
577 }
578
579 // Calls to C land
580 //
581 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
582 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
583 // has to be reset to 0. This is required to allow proper stack traversal.
584 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
585 Register last_java_fp,
586 Register last_java_pc,
587 Register scratch) {
588
589 if (last_java_pc->is_valid()) {
590 str(last_java_pc, Address(rthread,
591 JavaThread::frame_anchor_offset()
592 + JavaFrameAnchor::last_Java_pc_offset()));
593 }
594
595 // determine last_java_sp register
596 if (last_java_sp == sp) {
597 mov(scratch, sp);
598 last_java_sp = scratch;
599 } else if (!last_java_sp->is_valid()) {
600 last_java_sp = esp;
601 }
602
603 // last_java_fp is optional
604 if (last_java_fp->is_valid()) {
605 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
606 }
607
608 // We must set sp last.
609 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
610 }
611
612 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
613 Register last_java_fp,
614 address last_java_pc,
615 Register scratch) {
616 assert(last_java_pc != nullptr, "must provide a valid PC");
617
618 adr(scratch, last_java_pc);
619 str(scratch, Address(rthread,
620 JavaThread::frame_anchor_offset()
621 + JavaFrameAnchor::last_Java_pc_offset()));
622
623 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
624 }
625
626 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
627 Register last_java_fp,
628 Label &L,
629 Register scratch) {
630 if (L.is_bound()) {
631 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
632 } else {
633 InstructionMark im(this);
634 L.add_patch_at(code(), locator());
635 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
636 }
637 }
638
639 static inline bool target_needs_far_branch(address addr) {
640 if (AOTCodeCache::is_on_for_dump()) {
641 return true;
642 }
643 // codecache size <= 128M
644 if (!MacroAssembler::far_branches()) {
645 return false;
646 }
647 // codecache size > 240M
648 if (MacroAssembler::codestub_branch_needs_far_jump()) {
649 return true;
650 }
651 // codecache size: 128M..240M
652 return !CodeCache::is_non_nmethod(addr);
653 }
654
655 void MacroAssembler::far_call(Address entry, Register tmp) {
656 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
657 assert(CodeCache::find_blob(entry.target()) != nullptr,
658 "destination of far call not found in code cache");
659 assert(entry.rspec().type() == relocInfo::external_word_type
660 || entry.rspec().type() == relocInfo::runtime_call_type
661 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
662 if (target_needs_far_branch(entry.target())) {
663 uint64_t offset;
664 // We can use ADRP here because we know that the total size of
665 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
666 adrp(tmp, entry, offset);
667 add(tmp, tmp, offset);
668 blr(tmp);
669 } else {
670 bl(entry);
671 }
672 }
673
674 int MacroAssembler::far_jump(Address entry, Register tmp) {
675 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
676 assert(CodeCache::find_blob(entry.target()) != nullptr,
677 "destination of far call not found in code cache");
678 assert(entry.rspec().type() == relocInfo::external_word_type
679 || entry.rspec().type() == relocInfo::runtime_call_type
680 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
681 address start = pc();
682 if (target_needs_far_branch(entry.target())) {
683 uint64_t offset;
684 // We can use ADRP here because we know that the total size of
685 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
686 adrp(tmp, entry, offset);
687 add(tmp, tmp, offset);
688 br(tmp);
689 } else {
690 b(entry);
691 }
692 return pc() - start;
693 }
694
695 void MacroAssembler::reserved_stack_check() {
696 // testing if reserved zone needs to be enabled
697 Label no_reserved_zone_enabling;
698
699 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
700 cmp(sp, rscratch1);
701 br(Assembler::LO, no_reserved_zone_enabling);
702
703 enter(); // LR and FP are live.
704 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)));
705 mov(c_rarg0, rthread);
706 blr(rscratch1);
707 leave();
708
709 // We have already removed our own frame.
710 // throw_delayed_StackOverflowError will think that it's been
711 // called by our caller.
712 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
713 br(rscratch1);
714 should_not_reach_here();
715
716 bind(no_reserved_zone_enabling);
717 }
718
719 static void pass_arg0(MacroAssembler* masm, Register arg) {
720 if (c_rarg0 != arg ) {
721 masm->mov(c_rarg0, arg);
722 }
723 }
724
725 static void pass_arg1(MacroAssembler* masm, Register arg) {
726 if (c_rarg1 != arg ) {
727 masm->mov(c_rarg1, arg);
728 }
729 }
730
731 static void pass_arg2(MacroAssembler* masm, Register arg) {
732 if (c_rarg2 != arg ) {
733 masm->mov(c_rarg2, arg);
734 }
735 }
736
737 static void pass_arg3(MacroAssembler* masm, Register arg) {
738 if (c_rarg3 != arg ) {
739 masm->mov(c_rarg3, arg);
740 }
741 }
742
743 void MacroAssembler::call_VM_base(Register oop_result,
744 Register java_thread,
745 Register last_java_sp,
746 Label* return_pc,
747 address entry_point,
748 int number_of_arguments,
749 bool check_exceptions) {
750 // determine java_thread register
751 if (!java_thread->is_valid()) {
752 java_thread = rthread;
753 }
754
755 // determine last_java_sp register
756 if (!last_java_sp->is_valid()) {
757 last_java_sp = esp;
758 }
759
760 // debugging support
761 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
762 assert(java_thread == rthread, "unexpected register");
763 #ifdef ASSERT
764 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
765 // if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
766 #endif // ASSERT
767
768 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
769 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
770
771 // push java thread (becomes first argument of C function)
772
773 mov(c_rarg0, java_thread);
774
775 // set last Java frame before call
776 assert(last_java_sp != rfp, "can't use rfp");
777
778 Label l;
779 set_last_Java_frame(last_java_sp, rfp, return_pc != nullptr ? *return_pc : l, rscratch1);
780
781 // do the call, remove parameters
782 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
783
784 // lr could be poisoned with PAC signature during throw_pending_exception
785 // if it was tail-call optimized by compiler, since lr is not callee-saved
786 // reload it with proper value
787 adr(lr, l);
788
789 // reset last Java frame
790 // Only interpreter should have to clear fp
791 reset_last_Java_frame(true);
792
793 // C++ interp handles this in the interpreter
794 check_and_handle_popframe(java_thread);
795 check_and_handle_earlyret(java_thread);
796
797 if (check_exceptions) {
798 // check for pending exceptions (java_thread is set upon return)
799 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
800 Label ok;
801 cbz(rscratch1, ok);
802 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
803 br(rscratch1);
804 bind(ok);
805 }
806
807 // get oop result if there is one and reset the value in the thread
808 if (oop_result->is_valid()) {
809 get_vm_result_oop(oop_result, java_thread);
810 }
811 }
812
813 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
814 call_VM_base(oop_result, noreg, noreg, nullptr, entry_point, number_of_arguments, check_exceptions);
815 }
816
817 // Check the entry target is always reachable from any branch.
818 static bool is_always_within_branch_range(Address entry) {
819 if (AOTCodeCache::is_on_for_dump()) {
820 return false;
821 }
822 const address target = entry.target();
823
824 if (!CodeCache::contains(target)) {
825 // We always use trampolines for callees outside CodeCache.
826 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
827 return false;
828 }
829
830 if (!MacroAssembler::far_branches()) {
831 return true;
832 }
833
834 if (entry.rspec().type() == relocInfo::runtime_call_type) {
835 // Runtime calls are calls of a non-compiled method (stubs, adapters).
836 // Non-compiled methods stay forever in CodeCache.
837 // We check whether the longest possible branch is within the branch range.
838 assert(CodeCache::find_blob(target) != nullptr &&
839 !CodeCache::find_blob(target)->is_nmethod(),
840 "runtime call of compiled method");
841 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
842 const address left_longest_branch_start = CodeCache::low_bound();
843 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
844 Assembler::reachable_from_branch_at(right_longest_branch_start, target);
845 return is_reachable;
846 }
847
848 return false;
849 }
850
851 // Maybe emit a call via a trampoline. If the code cache is small
852 // trampolines won't be emitted.
853 address MacroAssembler::trampoline_call(Address entry) {
854 assert(entry.rspec().type() == relocInfo::runtime_call_type
855 || entry.rspec().type() == relocInfo::opt_virtual_call_type
856 || entry.rspec().type() == relocInfo::static_call_type
857 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
858
859 address target = entry.target();
860
861 if (!is_always_within_branch_range(entry)) {
862 if (!in_scratch_emit_size()) {
863 // We don't want to emit a trampoline if C2 is generating dummy
864 // code during its branch shortening phase.
865 if (entry.rspec().type() == relocInfo::runtime_call_type) {
866 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
867 code()->share_trampoline_for(entry.target(), offset());
868 } else {
869 address stub = emit_trampoline_stub(offset(), target);
870 if (stub == nullptr) {
871 postcond(pc() == badAddress);
872 return nullptr; // CodeCache is full
873 }
874 }
875 }
876 target = pc();
877 }
878
879 address call_pc = pc();
880 relocate(entry.rspec());
881 bl(target);
882
883 postcond(pc() != badAddress);
884 return call_pc;
885 }
886
887 // Emit a trampoline stub for a call to a target which is too far away.
888 //
889 // code sequences:
890 //
891 // call-site:
892 // branch-and-link to <destination> or <trampoline stub>
893 //
894 // Related trampoline stub for this call site in the stub section:
895 // load the call target from the constant pool
896 // branch (LR still points to the call site above)
897
898 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
899 address dest) {
900 // Max stub size: alignment nop, TrampolineStub.
901 address stub = start_a_stub(max_trampoline_stub_size());
902 if (stub == nullptr) {
903 return nullptr; // CodeBuffer::expand failed
904 }
905
906 // Create a trampoline stub relocation which relates this trampoline stub
907 // with the call instruction at insts_call_instruction_offset in the
908 // instructions code-section.
909 align(wordSize);
910 relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
911 + insts_call_instruction_offset));
912 const int stub_start_offset = offset();
913
914 // Now, create the trampoline stub's code:
915 // - load the call
916 // - call
917 Label target;
918 ldr(rscratch1, target);
919 br(rscratch1);
920 bind(target);
921 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
922 "should be");
923 emit_int64((int64_t)dest);
924
925 const address stub_start_addr = addr_at(stub_start_offset);
926
927 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
928
929 end_a_stub();
930 return stub_start_addr;
931 }
932
933 int MacroAssembler::max_trampoline_stub_size() {
934 // Max stub size: alignment nop, TrampolineStub.
935 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
936 }
937
938 void MacroAssembler::emit_static_call_stub() {
939 // CompiledDirectCall::set_to_interpreted knows the
940 // exact layout of this stub.
941
942 isb();
943 mov_metadata(rmethod, nullptr);
944
945 // Jump to the entry point of the c2i stub.
946 if (codestub_branch_needs_far_jump()) {
947 movptr(rscratch1, 0);
948 br(rscratch1);
949 } else {
950 b(pc());
951 }
952 }
953
954 int MacroAssembler::static_call_stub_size() {
955 // During AOT production run AOT and JIT compiled code
956 // are used at the same time. We need this size
957 // to be the same for both types of code.
958 if (!codestub_branch_needs_far_jump() && !AOTCodeCache::is_on_for_use()) {
959 // isb; movk; movz; movz; b
960 return 5 * NativeInstruction::instruction_size;
961 }
962 // isb; movk; movz; movz; movk; movz; movz; br
963 return 8 * NativeInstruction::instruction_size;
964 }
965
966 void MacroAssembler::c2bool(Register x) {
967 // implements x == 0 ? 0 : 1
968 // note: must only look at least-significant byte of x
969 // since C-style booleans are stored in one byte
970 // only! (was bug)
971 tst(x, 0xff);
972 cset(x, Assembler::NE);
973 }
974
975 address MacroAssembler::ic_call(address entry, jint method_index) {
976 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
977 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
978 return trampoline_call(Address(entry, rh));
979 }
980
981 int MacroAssembler::ic_check_size() {
982 int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
983 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
984 return NativeInstruction::instruction_size * (7 + extra_instructions);
985 } else {
986 return NativeInstruction::instruction_size * (5 + extra_instructions);
987 }
988 }
989
990 int MacroAssembler::ic_check(int end_alignment) {
991 Register receiver = j_rarg0;
992 Register data = rscratch2;
993 Register tmp1 = rscratch1;
994 Register tmp2 = r10;
995
996 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
997 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
998 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
999 // before the inline cache check here, and not after
1000 align(end_alignment, offset() + ic_check_size());
1001
1002 int uep_offset = offset();
1003
1004 if (UseCompactObjectHeaders) {
1005 load_narrow_klass_compact(tmp1, receiver);
1006 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1007 cmpw(tmp1, tmp2);
1008 } else {
1009 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1010 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1011 cmpw(tmp1, tmp2);
1012 }
1013
1014 Label dont;
1015 br(Assembler::EQ, dont);
1016 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1017 bind(dont);
1018 assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1019
1020 return uep_offset;
1021 }
1022
1023 // Implementation of call_VM versions
1024
1025 void MacroAssembler::call_VM(Register oop_result,
1026 address entry_point,
1027 bool check_exceptions) {
1028 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1029 }
1030
1031 void MacroAssembler::call_VM(Register oop_result,
1032 address entry_point,
1033 Register arg_1,
1034 bool check_exceptions) {
1035 pass_arg1(this, arg_1);
1036 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1037 }
1038
1039 void MacroAssembler::call_VM(Register oop_result,
1040 address entry_point,
1041 Register arg_1,
1042 Register arg_2,
1043 bool check_exceptions) {
1044 assert_different_registers(arg_1, c_rarg2);
1045 pass_arg2(this, arg_2);
1046 pass_arg1(this, arg_1);
1047 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1048 }
1049
1050 void MacroAssembler::call_VM(Register oop_result,
1051 address entry_point,
1052 Register arg_1,
1053 Register arg_2,
1054 Register arg_3,
1055 bool check_exceptions) {
1056 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1057 assert_different_registers(arg_2, c_rarg3);
1058 pass_arg3(this, arg_3);
1059
1060 pass_arg2(this, arg_2);
1061
1062 pass_arg1(this, arg_1);
1063 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1064 }
1065
1066 void MacroAssembler::call_VM(Register oop_result,
1067 Register last_java_sp,
1068 address entry_point,
1069 int number_of_arguments,
1070 bool check_exceptions) {
1071 call_VM_base(oop_result, rthread, last_java_sp, nullptr, entry_point, number_of_arguments, check_exceptions);
1072 }
1073
1074 void MacroAssembler::call_VM(Register oop_result,
1075 Register last_java_sp,
1076 address entry_point,
1077 Register arg_1,
1078 bool check_exceptions) {
1079 pass_arg1(this, arg_1);
1080 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1081 }
1082
1083 void MacroAssembler::call_VM(Register oop_result,
1084 Register last_java_sp,
1085 address entry_point,
1086 Register arg_1,
1087 Register arg_2,
1088 bool check_exceptions) {
1089
1090 assert_different_registers(arg_1, c_rarg2);
1091 pass_arg2(this, arg_2);
1092 pass_arg1(this, arg_1);
1093 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1094 }
1095
1096 void MacroAssembler::call_VM(Register oop_result,
1097 Register last_java_sp,
1098 address entry_point,
1099 Register arg_1,
1100 Register arg_2,
1101 Register arg_3,
1102 bool check_exceptions) {
1103 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1104 assert_different_registers(arg_2, c_rarg3);
1105 pass_arg3(this, arg_3);
1106 pass_arg2(this, arg_2);
1107 pass_arg1(this, arg_1);
1108 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1109 }
1110
1111
1112 void MacroAssembler::get_vm_result_oop(Register oop_result, Register java_thread) {
1113 ldr(oop_result, Address(java_thread, JavaThread::vm_result_oop_offset()));
1114 str(zr, Address(java_thread, JavaThread::vm_result_oop_offset()));
1115 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1116 }
1117
1118 void MacroAssembler::get_vm_result_metadata(Register metadata_result, Register java_thread) {
1119 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1120 str(zr, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1121 }
1122
1123 void MacroAssembler::align(int modulus) {
1124 align(modulus, offset());
1125 }
1126
1127 // Ensure that the code at target bytes offset from the current offset() is aligned
1128 // according to modulus.
1129 void MacroAssembler::align(int modulus, int target) {
1130 int delta = target - offset();
1131 while ((offset() + delta) % modulus != 0) nop();
1132 }
1133
1134 void MacroAssembler::post_call_nop() {
1135 if (!Continuations::enabled()) {
1136 return;
1137 }
1138 InstructionMark im(this);
1139 relocate(post_call_nop_Relocation::spec());
1140 InlineSkippedInstructionsCounter skipCounter(this);
1141 nop();
1142 movk(zr, 0);
1143 movk(zr, 0);
1144 }
1145
1146 // these are no-ops overridden by InterpreterMacroAssembler
1147
1148 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1149
1150 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1151
1152 // Look up the method for a megamorphic invokeinterface call.
1153 // The target method is determined by <intf_klass, itable_index>.
1154 // The receiver klass is in recv_klass.
1155 // On success, the result will be in method_result, and execution falls through.
1156 // On failure, execution transfers to the given label.
1157 void MacroAssembler::lookup_interface_method(Register recv_klass,
1158 Register intf_klass,
1159 RegisterOrConstant itable_index,
1160 Register method_result,
1161 Register scan_temp,
1162 Label& L_no_such_interface,
1163 bool return_method) {
1164 assert_different_registers(recv_klass, intf_klass, scan_temp);
1165 assert_different_registers(method_result, intf_klass, scan_temp);
1166 assert(recv_klass != method_result || !return_method,
1167 "recv_klass can be destroyed when method isn't needed");
1168 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1169 "caller must use same register for non-constant itable index as for method");
1170
1171 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1172 int vtable_base = in_bytes(Klass::vtable_start_offset());
1173 int itentry_off = in_bytes(itableMethodEntry::method_offset());
1174 int scan_step = itableOffsetEntry::size() * wordSize;
1175 int vte_size = vtableEntry::size_in_bytes();
1176 assert(vte_size == wordSize, "else adjust times_vte_scale");
1177
1178 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1179
1180 // Could store the aligned, prescaled offset in the klass.
1181 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1182 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1183 add(scan_temp, scan_temp, vtable_base);
1184
1185 if (return_method) {
1186 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1187 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1188 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1189 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1190 if (itentry_off)
1191 add(recv_klass, recv_klass, itentry_off);
1192 }
1193
1194 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1195 // if (scan->interface() == intf) {
1196 // result = (klass + scan->offset() + itable_index);
1197 // }
1198 // }
1199 Label search, found_method;
1200
1201 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1202 cmp(intf_klass, method_result);
1203 br(Assembler::EQ, found_method);
1204 bind(search);
1205 // Check that the previous entry is non-null. A null entry means that
1206 // the receiver class doesn't implement the interface, and wasn't the
1207 // same as when the caller was compiled.
1208 cbz(method_result, L_no_such_interface);
1209 if (itableOffsetEntry::interface_offset() != 0) {
1210 add(scan_temp, scan_temp, scan_step);
1211 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1212 } else {
1213 ldr(method_result, Address(pre(scan_temp, scan_step)));
1214 }
1215 cmp(intf_klass, method_result);
1216 br(Assembler::NE, search);
1217
1218 bind(found_method);
1219
1220 // Got a hit.
1221 if (return_method) {
1222 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1223 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1224 }
1225 }
1226
1227 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1228 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1229 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1230 // The target method is determined by <holder_klass, itable_index>.
1231 // The receiver klass is in recv_klass.
1232 // On success, the result will be in method_result, and execution falls through.
1233 // On failure, execution transfers to the given label.
1234 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1235 Register holder_klass,
1236 Register resolved_klass,
1237 Register method_result,
1238 Register temp_itbl_klass,
1239 Register scan_temp,
1240 int itable_index,
1241 Label& L_no_such_interface) {
1242 // 'method_result' is only used as output register at the very end of this method.
1243 // Until then we can reuse it as 'holder_offset'.
1244 Register holder_offset = method_result;
1245 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1246
1247 int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1248 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1249 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1250 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1251
1252 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1253
1254 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1255 add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1256 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1257 // temp_itbl_klass = itable[0]._interface;
1258 int vtblEntrySize = vtableEntry::size_in_bytes();
1259 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1260 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1261 mov(holder_offset, zr);
1262 // scan_temp = &(itable[0]._interface)
1263 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1264
1265 // Initial checks:
1266 // - if (holder_klass != resolved_klass), go to "scan for resolved"
1267 // - if (itable[0] == holder_klass), shortcut to "holder found"
1268 // - if (itable[0] == 0), no such interface
1269 cmp(resolved_klass, holder_klass);
1270 br(Assembler::NE, L_loop_search_resolved_entry);
1271 cmp(holder_klass, temp_itbl_klass);
1272 br(Assembler::EQ, L_holder_found);
1273 cbz(temp_itbl_klass, L_no_such_interface);
1274
1275 // Loop: Look for holder_klass record in itable
1276 // do {
1277 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1278 // if (temp_itbl_klass == holder_klass) {
1279 // goto L_holder_found; // Found!
1280 // }
1281 // } while (temp_itbl_klass != 0);
1282 // goto L_no_such_interface // Not found.
1283 Label L_search_holder;
1284 bind(L_search_holder);
1285 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1286 cmp(holder_klass, temp_itbl_klass);
1287 br(Assembler::EQ, L_holder_found);
1288 cbnz(temp_itbl_klass, L_search_holder);
1289
1290 b(L_no_such_interface);
1291
1292 // Loop: Look for resolved_class record in itable
1293 // while (true) {
1294 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1295 // if (temp_itbl_klass == 0) {
1296 // goto L_no_such_interface;
1297 // }
1298 // if (temp_itbl_klass == resolved_klass) {
1299 // goto L_resolved_found; // Found!
1300 // }
1301 // if (temp_itbl_klass == holder_klass) {
1302 // holder_offset = scan_temp;
1303 // }
1304 // }
1305 //
1306 Label L_loop_search_resolved;
1307 bind(L_loop_search_resolved);
1308 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1309 bind(L_loop_search_resolved_entry);
1310 cbz(temp_itbl_klass, L_no_such_interface);
1311 cmp(resolved_klass, temp_itbl_klass);
1312 br(Assembler::EQ, L_resolved_found);
1313 cmp(holder_klass, temp_itbl_klass);
1314 br(Assembler::NE, L_loop_search_resolved);
1315 mov(holder_offset, scan_temp);
1316 b(L_loop_search_resolved);
1317
1318 // See if we already have a holder klass. If not, go and scan for it.
1319 bind(L_resolved_found);
1320 cbz(holder_offset, L_search_holder);
1321 mov(scan_temp, holder_offset);
1322
1323 // Finally, scan_temp contains holder_klass vtable offset
1324 bind(L_holder_found);
1325 ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1326 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1327 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1328 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1329 }
1330
1331 // virtual method calling
1332 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1333 RegisterOrConstant vtable_index,
1334 Register method_result) {
1335 assert(vtableEntry::size() * wordSize == 8,
1336 "adjust the scaling in the code below");
1337 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1338
1339 if (vtable_index.is_register()) {
1340 lea(method_result, Address(recv_klass,
1341 vtable_index.as_register(),
1342 Address::lsl(LogBytesPerWord)));
1343 ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1344 } else {
1345 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1346 ldr(method_result,
1347 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1348 }
1349 }
1350
1351 void MacroAssembler::check_klass_subtype(Register sub_klass,
1352 Register super_klass,
1353 Register temp_reg,
1354 Label& L_success) {
1355 Label L_failure;
1356 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
1357 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1358 bind(L_failure);
1359 }
1360
1361
1362 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1363 Register super_klass,
1364 Register temp_reg,
1365 Label* L_success,
1366 Label* L_failure,
1367 Label* L_slow_path,
1368 Register super_check_offset) {
1369 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset);
1370 bool must_load_sco = ! super_check_offset->is_valid();
1371 if (must_load_sco) {
1372 assert(temp_reg != noreg, "supply either a temp or a register offset");
1373 }
1374
1375 Label L_fallthrough;
1376 int label_nulls = 0;
1377 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1378 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1379 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1380 assert(label_nulls <= 1, "at most one null in the batch");
1381
1382 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1383 Address super_check_offset_addr(super_klass, sco_offset);
1384
1385 // Hacked jmp, which may only be used just before L_fallthrough.
1386 #define final_jmp(label) \
1387 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
1388 else b(label) /*omit semi*/
1389
1390 // If the pointers are equal, we are done (e.g., String[] elements).
1391 // This self-check enables sharing of secondary supertype arrays among
1392 // non-primary types such as array-of-interface. Otherwise, each such
1393 // type would need its own customized SSA.
1394 // We move this check to the front of the fast path because many
1395 // type checks are in fact trivially successful in this manner,
1396 // so we get a nicely predicted branch right at the start of the check.
1397 cmp(sub_klass, super_klass);
1398 br(Assembler::EQ, *L_success);
1399
1400 // Check the supertype display:
1401 if (must_load_sco) {
1402 ldrw(temp_reg, super_check_offset_addr);
1403 super_check_offset = temp_reg;
1404 }
1405
1406 Address super_check_addr(sub_klass, super_check_offset);
1407 ldr(rscratch1, super_check_addr);
1408 cmp(super_klass, rscratch1); // load displayed supertype
1409 br(Assembler::EQ, *L_success);
1410
1411 // This check has worked decisively for primary supers.
1412 // Secondary supers are sought in the super_cache ('super_cache_addr').
1413 // (Secondary supers are interfaces and very deeply nested subtypes.)
1414 // This works in the same check above because of a tricky aliasing
1415 // between the super_cache and the primary super display elements.
1416 // (The 'super_check_addr' can address either, as the case requires.)
1417 // Note that the cache is updated below if it does not help us find
1418 // what we need immediately.
1419 // So if it was a primary super, we can just fail immediately.
1420 // Otherwise, it's the slow path for us (no success at this point).
1421
1422 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
1423 if (L_failure == &L_fallthrough) {
1424 cbz(rscratch1, *L_slow_path);
1425 } else {
1426 cbnz(rscratch1, *L_failure);
1427 final_jmp(*L_slow_path);
1428 }
1429
1430 bind(L_fallthrough);
1431
1432 #undef final_jmp
1433 }
1434
1435 // These two are taken from x86, but they look generally useful
1436
1437 // scans count pointer sized words at [addr] for occurrence of value,
1438 // generic
1439 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1440 Register scratch) {
1441 Label Lloop, Lexit;
1442 cbz(count, Lexit);
1443 bind(Lloop);
1444 ldr(scratch, post(addr, wordSize));
1445 cmp(value, scratch);
1446 br(EQ, Lexit);
1447 sub(count, count, 1);
1448 cbnz(count, Lloop);
1449 bind(Lexit);
1450 }
1451
1452 // scans count 4 byte words at [addr] for occurrence of value,
1453 // generic
1454 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1455 Register scratch) {
1456 Label Lloop, Lexit;
1457 cbz(count, Lexit);
1458 bind(Lloop);
1459 ldrw(scratch, post(addr, wordSize));
1460 cmpw(value, scratch);
1461 br(EQ, Lexit);
1462 sub(count, count, 1);
1463 cbnz(count, Lloop);
1464 bind(Lexit);
1465 }
1466
1467 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
1468 Register super_klass,
1469 Register temp_reg,
1470 Register temp2_reg,
1471 Label* L_success,
1472 Label* L_failure,
1473 bool set_cond_codes) {
1474 // NB! Callers may assume that, when temp2_reg is a valid register,
1475 // this code sets it to a nonzero value.
1476
1477 assert_different_registers(sub_klass, super_klass, temp_reg);
1478 if (temp2_reg != noreg)
1479 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1480 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1481
1482 Label L_fallthrough;
1483 int label_nulls = 0;
1484 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1485 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1486 assert(label_nulls <= 1, "at most one null in the batch");
1487
1488 // a couple of useful fields in sub_klass:
1489 int ss_offset = in_bytes(Klass::secondary_supers_offset());
1490 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1491 Address secondary_supers_addr(sub_klass, ss_offset);
1492 Address super_cache_addr( sub_klass, sc_offset);
1493
1494 BLOCK_COMMENT("check_klass_subtype_slow_path");
1495
1496 // Do a linear scan of the secondary super-klass chain.
1497 // This code is rarely used, so simplicity is a virtue here.
1498 // The repne_scan instruction uses fixed registers, which we must spill.
1499 // Don't worry too much about pre-existing connections with the input regs.
1500
1501 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1502 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1503
1504 RegSet pushed_registers;
1505 if (!IS_A_TEMP(r2)) pushed_registers += r2;
1506 if (!IS_A_TEMP(r5)) pushed_registers += r5;
1507
1508 if (super_klass != r0) {
1509 if (!IS_A_TEMP(r0)) pushed_registers += r0;
1510 }
1511
1512 push(pushed_registers, sp);
1513
1514 // Get super_klass value into r0 (even if it was in r5 or r2).
1515 if (super_klass != r0) {
1516 mov(r0, super_klass);
1517 }
1518
1519 #ifndef PRODUCT
1520 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1521 #endif //PRODUCT
1522
1523 // We will consult the secondary-super array.
1524 ldr(r5, secondary_supers_addr);
1525 // Load the array length.
1526 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1527 // Skip to start of data.
1528 add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1529
1530 cmp(sp, zr); // Clear Z flag; SP is never zero
1531 // Scan R2 words at [R5] for an occurrence of R0.
1532 // Set NZ/Z based on last compare.
1533 repne_scan(r5, r0, r2, rscratch1);
1534
1535 // Unspill the temp. registers:
1536 pop(pushed_registers, sp);
1537
1538 br(Assembler::NE, *L_failure);
1539
1540 // Success. Cache the super we found and proceed in triumph.
1541
1542 if (UseSecondarySupersCache) {
1543 str(super_klass, super_cache_addr);
1544 }
1545
1546 if (L_success != &L_fallthrough) {
1547 b(*L_success);
1548 }
1549
1550 #undef IS_A_TEMP
1551
1552 bind(L_fallthrough);
1553 }
1554
1555 // If Register r is invalid, remove a new register from
1556 // available_regs, and add new register to regs_to_push.
1557 Register MacroAssembler::allocate_if_noreg(Register r,
1558 RegSetIterator<Register> &available_regs,
1559 RegSet ®s_to_push) {
1560 if (!r->is_valid()) {
1561 r = *available_regs++;
1562 regs_to_push += r;
1563 }
1564 return r;
1565 }
1566
1567 // check_klass_subtype_slow_path_table() looks for super_klass in the
1568 // hash table belonging to super_klass, branching to L_success or
1569 // L_failure as appropriate. This is essentially a shim which
1570 // allocates registers as necessary then calls
1571 // lookup_secondary_supers_table() to do the work. Any of the temp
1572 // regs may be noreg, in which case this logic will chooses some
1573 // registers push and pop them from the stack.
1574 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
1575 Register super_klass,
1576 Register temp_reg,
1577 Register temp2_reg,
1578 Register temp3_reg,
1579 Register result_reg,
1580 FloatRegister vtemp,
1581 Label* L_success,
1582 Label* L_failure,
1583 bool set_cond_codes) {
1584 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
1585
1586 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1587
1588 Label L_fallthrough;
1589 int label_nulls = 0;
1590 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1591 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1592 assert(label_nulls <= 1, "at most one null in the batch");
1593
1594 BLOCK_COMMENT("check_klass_subtype_slow_path");
1595
1596 RegSetIterator<Register> available_regs
1597 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin();
1598
1599 RegSet pushed_regs;
1600
1601 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
1602 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
1603 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
1604 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
1605
1606 push(pushed_regs, sp);
1607
1608 lookup_secondary_supers_table_var(sub_klass,
1609 super_klass,
1610 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg,
1611 nullptr);
1612 cmp(result_reg, zr);
1613
1614 // Unspill the temp. registers:
1615 pop(pushed_regs, sp);
1616
1617 // NB! Callers may assume that, when set_cond_codes is true, this
1618 // code sets temp2_reg to a nonzero value.
1619 if (set_cond_codes) {
1620 mov(temp2_reg, 1);
1621 }
1622
1623 br(Assembler::NE, *L_failure);
1624
1625 if (L_success != &L_fallthrough) {
1626 b(*L_success);
1627 }
1628
1629 bind(L_fallthrough);
1630 }
1631
1632 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1633 Register super_klass,
1634 Register temp_reg,
1635 Register temp2_reg,
1636 Label* L_success,
1637 Label* L_failure,
1638 bool set_cond_codes) {
1639 if (UseSecondarySupersTable) {
1640 check_klass_subtype_slow_path_table
1641 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg,
1642 /*vtemp*/fnoreg,
1643 L_success, L_failure, set_cond_codes);
1644 } else {
1645 check_klass_subtype_slow_path_linear
1646 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes);
1647 }
1648 }
1649
1650
1651 // Ensure that the inline code and the stub are using the same registers.
1652 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
1653 do { \
1654 assert(r_super_klass == r0 && \
1655 r_array_base == r1 && \
1656 r_array_length == r2 && \
1657 (r_array_index == r3 || r_array_index == noreg) && \
1658 (r_sub_klass == r4 || r_sub_klass == noreg) && \
1659 (r_bitmap == rscratch2 || r_bitmap == noreg) && \
1660 (result == r5 || result == noreg), "registers must match aarch64.ad"); \
1661 } while(0)
1662
1663 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
1664 Register r_super_klass,
1665 Register temp1,
1666 Register temp2,
1667 Register temp3,
1668 FloatRegister vtemp,
1669 Register result,
1670 u1 super_klass_slot,
1671 bool stub_is_near) {
1672 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1673
1674 Label L_fallthrough;
1675
1676 BLOCK_COMMENT("lookup_secondary_supers_table {");
1677
1678 const Register
1679 r_array_base = temp1, // r1
1680 r_array_length = temp2, // r2
1681 r_array_index = temp3, // r3
1682 r_bitmap = rscratch2;
1683
1684 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1685
1686 u1 bit = super_klass_slot;
1687
1688 // Make sure that result is nonzero if the TBZ below misses.
1689 mov(result, 1);
1690
1691 // We're going to need the bitmap in a vector reg and in a core reg,
1692 // so load both now.
1693 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1694 if (bit != 0) {
1695 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1696 }
1697 // First check the bitmap to see if super_klass might be present. If
1698 // the bit is zero, we are certain that super_klass is not one of
1699 // the secondary supers.
1700 tbz(r_bitmap, bit, L_fallthrough);
1701
1702 // Get the first array index that can contain super_klass into r_array_index.
1703 if (bit != 0) {
1704 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1705 cnt(vtemp, T8B, vtemp);
1706 addv(vtemp, T8B, vtemp);
1707 fmovd(r_array_index, vtemp);
1708 } else {
1709 mov(r_array_index, (u1)1);
1710 }
1711 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1712
1713 // We will consult the secondary-super array.
1714 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1715
1716 // The value i in r_array_index is >= 1, so even though r_array_base
1717 // points to the length, we don't need to adjust it to point to the
1718 // data.
1719 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1720 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1721
1722 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1723 eor(result, result, r_super_klass);
1724 cbz(result, L_fallthrough); // Found a match
1725
1726 // Is there another entry to check? Consult the bitmap.
1727 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1728
1729 // Linear probe.
1730 if (bit != 0) {
1731 ror(r_bitmap, r_bitmap, bit);
1732 }
1733
1734 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1735 // The next slot to be inspected, by the stub we're about to call,
1736 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1737 // have been checked.
1738 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1739 if (stub_is_near) {
1740 bl(stub);
1741 } else {
1742 address call = trampoline_call(stub);
1743 if (call == nullptr) {
1744 return false; // trampoline allocation failed
1745 }
1746 }
1747
1748 BLOCK_COMMENT("} lookup_secondary_supers_table");
1749
1750 bind(L_fallthrough);
1751
1752 if (VerifySecondarySupers) {
1753 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1754 temp1, temp2, result); // r1, r2, r5
1755 }
1756 return true;
1757 }
1758
1759 // At runtime, return 0 in result if r_super_klass is a superclass of
1760 // r_sub_klass, otherwise return nonzero. Use this version of
1761 // lookup_secondary_supers_table() if you don't know ahead of time
1762 // which superclass will be searched for. Used by interpreter and
1763 // runtime stubs. It is larger and has somewhat greater latency than
1764 // the version above, which takes a constant super_klass_slot.
1765 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
1766 Register r_super_klass,
1767 Register temp1,
1768 Register temp2,
1769 Register temp3,
1770 FloatRegister vtemp,
1771 Register result,
1772 Label *L_success) {
1773 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1774
1775 Label L_fallthrough;
1776
1777 BLOCK_COMMENT("lookup_secondary_supers_table {");
1778
1779 const Register
1780 r_array_index = temp3,
1781 slot = rscratch1,
1782 r_bitmap = rscratch2;
1783
1784 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
1785
1786 // Make sure that result is nonzero if the test below misses.
1787 mov(result, 1);
1788
1789 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1790
1791 // First check the bitmap to see if super_klass might be present. If
1792 // the bit is zero, we are certain that super_klass is not one of
1793 // the secondary supers.
1794
1795 // This next instruction is equivalent to:
1796 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1797 // sub(temp2, tmp_reg, slot);
1798 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1799 lslv(temp2, r_bitmap, temp2);
1800 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough);
1801
1802 bool must_save_v0 = (vtemp == fnoreg);
1803 if (must_save_v0) {
1804 // temp1 and result are free, so use them to preserve vtemp
1805 vtemp = v0;
1806 mov(temp1, vtemp, D, 0);
1807 mov(result, vtemp, D, 1);
1808 }
1809
1810 // Get the first array index that can contain super_klass into r_array_index.
1811 mov(vtemp, D, 0, temp2);
1812 cnt(vtemp, T8B, vtemp);
1813 addv(vtemp, T8B, vtemp);
1814 mov(r_array_index, vtemp, D, 0);
1815
1816 if (must_save_v0) {
1817 mov(vtemp, D, 0, temp1 );
1818 mov(vtemp, D, 1, result);
1819 }
1820
1821 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1822
1823 const Register
1824 r_array_base = temp1,
1825 r_array_length = temp2;
1826
1827 // The value i in r_array_index is >= 1, so even though r_array_base
1828 // points to the length, we don't need to adjust it to point to the
1829 // data.
1830 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1831 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1832
1833 // We will consult the secondary-super array.
1834 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1835
1836 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1837 eor(result, result, r_super_klass);
1838 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match
1839
1840 // Is there another entry to check? Consult the bitmap.
1841 rorv(r_bitmap, r_bitmap, slot);
1842 // rol(r_bitmap, r_bitmap, 1);
1843 tbz(r_bitmap, 1, L_fallthrough);
1844
1845 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1846 // The next slot to be inspected, by the logic we're about to call,
1847 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1848 // have been checked.
1849 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index,
1850 r_bitmap, r_array_length, result, /*is_stub*/false);
1851
1852 BLOCK_COMMENT("} lookup_secondary_supers_table");
1853
1854 bind(L_fallthrough);
1855
1856 if (VerifySecondarySupers) {
1857 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1858 temp1, temp2, result); // r1, r2, r5
1859 }
1860
1861 if (L_success) {
1862 cbz(result, *L_success);
1863 }
1864 }
1865
1866 // Called by code generated by check_klass_subtype_slow_path
1867 // above. This is called when there is a collision in the hashed
1868 // lookup in the secondary supers array.
1869 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1870 Register r_array_base,
1871 Register r_array_index,
1872 Register r_bitmap,
1873 Register temp1,
1874 Register result,
1875 bool is_stub) {
1876 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1877
1878 const Register
1879 r_array_length = temp1,
1880 r_sub_klass = noreg; // unused
1881
1882 if (is_stub) {
1883 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1884 }
1885
1886 Label L_fallthrough, L_huge;
1887
1888 // Load the array length.
1889 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1890 // And adjust the array base to point to the data.
1891 // NB! Effectively increments current slot index by 1.
1892 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1893 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1894
1895 // The bitmap is full to bursting.
1896 // Implicit invariant: BITMAP_FULL implies (length > 0)
1897 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1898 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2));
1899 br(GT, L_huge);
1900
1901 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
1902 // current slot (at secondary_supers[r_array_index]) has not yet
1903 // been inspected, and r_array_index may be out of bounds if we
1904 // wrapped around the end of the array.
1905
1906 { // This is conventional linear probing, but instead of terminating
1907 // when a null entry is found in the table, we maintain a bitmap
1908 // in which a 0 indicates missing entries.
1909 // As long as the bitmap is not completely full,
1910 // array_length == popcount(bitmap). The array_length check above
1911 // guarantees there are 0s in the bitmap, so the loop eventually
1912 // terminates.
1913 Label L_loop;
1914 bind(L_loop);
1915
1916 // Check for wraparound.
1917 cmp(r_array_index, r_array_length);
1918 csel(r_array_index, zr, r_array_index, GE);
1919
1920 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1921 eor(result, rscratch1, r_super_klass);
1922 cbz(result, L_fallthrough);
1923
1924 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1925
1926 ror(r_bitmap, r_bitmap, 1);
1927 add(r_array_index, r_array_index, 1);
1928 b(L_loop);
1929 }
1930
1931 { // Degenerate case: more than 64 secondary supers.
1932 // FIXME: We could do something smarter here, maybe a vectorized
1933 // comparison or a binary search, but is that worth any added
1934 // complexity?
1935 bind(L_huge);
1936 cmp(sp, zr); // Clear Z flag; SP is never zero
1937 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1938 cset(result, NE); // result == 0 iff we got a match.
1939 }
1940
1941 bind(L_fallthrough);
1942 }
1943
1944 // Make sure that the hashed lookup and a linear scan agree.
1945 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1946 Register r_super_klass,
1947 Register temp1,
1948 Register temp2,
1949 Register result) {
1950 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1951
1952 const Register
1953 r_array_base = temp1,
1954 r_array_length = temp2;
1955
1956 BLOCK_COMMENT("verify_secondary_supers_table {");
1957
1958 // We will consult the secondary-super array.
1959 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1960
1961 // Load the array length.
1962 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1963 // And adjust the array base to point to the data.
1964 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1965
1966 cmp(sp, zr); // Clear Z flag; SP is never zero
1967 // Scan R2 words at [R5] for an occurrence of R0.
1968 // Set NZ/Z based on last compare.
1969 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
1970 // rscratch1 == 0 iff we got a match.
1971 cset(rscratch1, NE);
1972
1973 Label passed;
1974 cmp(result, zr);
1975 cset(result, NE); // normalize result to 0/1 for comparison
1976
1977 cmp(rscratch1, result);
1978 br(EQ, passed);
1979 {
1980 mov(r0, r_super_klass); // r0 <- r0
1981 mov(r1, r_sub_klass); // r1 <- r4
1982 mov(r2, /*expected*/rscratch1); // r2 <- r8
1983 mov(r3, result); // r3 <- r5
1984 mov(r4, (address)("mismatch")); // r4 <- const
1985 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
1986 should_not_reach_here();
1987 }
1988 bind(passed);
1989
1990 BLOCK_COMMENT("} verify_secondary_supers_table");
1991 }
1992
1993 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
1994 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
1995 assert_different_registers(klass, rthread, scratch);
1996
1997 Label L_fallthrough, L_tmp;
1998 if (L_fast_path == nullptr) {
1999 L_fast_path = &L_fallthrough;
2000 } else if (L_slow_path == nullptr) {
2001 L_slow_path = &L_fallthrough;
2002 }
2003 // Fast path check: class is fully initialized
2004 lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
2005 ldarb(scratch, scratch);
2006 cmp(scratch, InstanceKlass::fully_initialized);
2007 br(Assembler::EQ, *L_fast_path);
2008
2009 // Fast path check: current thread is initializer thread
2010 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
2011 cmp(rthread, scratch);
2012
2013 if (L_slow_path == &L_fallthrough) {
2014 br(Assembler::EQ, *L_fast_path);
2015 bind(*L_slow_path);
2016 } else if (L_fast_path == &L_fallthrough) {
2017 br(Assembler::NE, *L_slow_path);
2018 bind(*L_fast_path);
2019 } else {
2020 Unimplemented();
2021 }
2022 }
2023
2024 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
2025 if (!VerifyOops) return;
2026
2027 // Pass register number to verify_oop_subroutine
2028 const char* b = nullptr;
2029 {
2030 ResourceMark rm;
2031 stringStream ss;
2032 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
2033 b = code_string(ss.as_string());
2034 }
2035 BLOCK_COMMENT("verify_oop {");
2036
2037 strip_return_address(); // This might happen within a stack frame.
2038 protect_return_address();
2039 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2040 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2041
2042 mov(r0, reg);
2043 movptr(rscratch1, (uintptr_t)(address)b);
2044
2045 // call indirectly to solve generation ordering problem
2046 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2047 ldr(rscratch2, Address(rscratch2));
2048 blr(rscratch2);
2049
2050 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2051 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2052 authenticate_return_address();
2053
2054 BLOCK_COMMENT("} verify_oop");
2055 }
2056
2057 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
2058 if (!VerifyOops) return;
2059
2060 const char* b = nullptr;
2061 {
2062 ResourceMark rm;
2063 stringStream ss;
2064 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
2065 b = code_string(ss.as_string());
2066 }
2067 BLOCK_COMMENT("verify_oop_addr {");
2068
2069 strip_return_address(); // This might happen within a stack frame.
2070 protect_return_address();
2071 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2072 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2073
2074 // addr may contain sp so we will have to adjust it based on the
2075 // pushes that we just did.
2076 if (addr.uses(sp)) {
2077 lea(r0, addr);
2078 ldr(r0, Address(r0, 4 * wordSize));
2079 } else {
2080 ldr(r0, addr);
2081 }
2082 movptr(rscratch1, (uintptr_t)(address)b);
2083
2084 // call indirectly to solve generation ordering problem
2085 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2086 ldr(rscratch2, Address(rscratch2));
2087 blr(rscratch2);
2088
2089 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2090 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2091 authenticate_return_address();
2092
2093 BLOCK_COMMENT("} verify_oop_addr");
2094 }
2095
2096 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2097 int extra_slot_offset) {
2098 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2099 int stackElementSize = Interpreter::stackElementSize;
2100 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
2101 #ifdef ASSERT
2102 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2103 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2104 #endif
2105 if (arg_slot.is_constant()) {
2106 return Address(esp, arg_slot.as_constant() * stackElementSize
2107 + offset);
2108 } else {
2109 add(rscratch1, esp, arg_slot.as_register(),
2110 ext::uxtx, exact_log2(stackElementSize));
2111 return Address(rscratch1, offset);
2112 }
2113 }
2114
2115 // Handle the receiver type profile update given the "recv" klass.
2116 //
2117 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
2118 // If there are no matching or claimable receiver entries in RD, updates
2119 // the polymorphic counter.
2120 //
2121 // This code expected to run by either the interpreter or JIT-ed code, without
2122 // extra synchronization. For safety, receiver cells are claimed atomically, which
2123 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
2124 // counter updates are not atomic.
2125 //
2126 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
2127 assert_different_registers(recv, mdp, rscratch1, rscratch2);
2128
2129 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
2130 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
2131 int poly_count_offset = in_bytes(CounterData::count_offset());
2132 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
2133 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
2134
2135 // Adjust for MDP offsets.
2136 base_receiver_offset += mdp_offset;
2137 end_receiver_offset += mdp_offset;
2138 poly_count_offset += mdp_offset;
2139
2140 #ifdef ASSERT
2141 // We are about to walk the MDO slots without asking for offsets.
2142 // Check that our math hits all the right spots.
2143 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
2144 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
2145 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
2146 int offset = base_receiver_offset + receiver_step*c;
2147 int count_offset = offset + receiver_to_count_step;
2148 assert(offset == real_recv_offset, "receiver slot math");
2149 assert(count_offset == real_count_offset, "receiver count math");
2150 }
2151 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
2152 assert(poly_count_offset == real_poly_count_offset, "poly counter math");
2153 #endif
2154
2155 // Corner case: no profile table. Increment poly counter and exit.
2156 if (ReceiverTypeData::row_limit() == 0) {
2157 increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
2158 return;
2159 }
2160
2161 Register offset = rscratch2;
2162
2163 Label L_loop_search_receiver, L_loop_search_empty;
2164 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
2165
2166 // The code here recognizes three major cases:
2167 // A. Fastest: receiver found in the table
2168 // B. Fast: no receiver in the table, and the table is full
2169 // C. Slow: no receiver in the table, free slots in the table
2170 //
2171 // The case A performance is most important, as perfectly-behaved code would end up
2172 // there, especially with larger TypeProfileWidth. The case B performance is
2173 // important as well, this is where bulk of code would land for normally megamorphic
2174 // cases. The case C performance is not essential, its job is to deal with installation
2175 // races, we optimize for code density instead. Case C needs to make sure that receiver
2176 // rows are only claimed once. This makes sure we never overwrite a row for another
2177 // receiver and never duplicate the receivers in the list, making profile type-accurate.
2178 //
2179 // It is very tempting to handle these cases in a single loop, and claim the first slot
2180 // without checking the rest of the table. But, profiling code should tolerate free slots
2181 // in the table, as class unloading can clear them. After such cleanup, the receiver
2182 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
2183 // to complete, before trying to install new slots. Splitting the code in several tight
2184 // loops also helpfully optimizes for cases A and B.
2185 //
2186 // This code is effectively:
2187 //
2188 // restart:
2189 // // Fastest: receiver is already installed
2190 // for (i = 0; i < receiver_count(); i++) {
2191 // if (receiver(i) == recv) goto found_recv(i);
2192 // }
2193 //
2194 // // Fast: no receiver, but profile is full
2195 // for (i = 0; i < receiver_count(); i++) {
2196 // if (receiver(i) == null) goto found_null(i);
2197 // }
2198 // goto polymorphic
2199 //
2200 // // Slow: try to install receiver
2201 // found_null(i):
2202 // CAS(&receiver(i), null, recv);
2203 // goto restart
2204 //
2205 // polymorphic:
2206 // count++;
2207 // return
2208 //
2209 // found_recv(i):
2210 // *receiver_count(i)++
2211 //
2212
2213 bind(L_restart);
2214
2215 // Fastest: receiver is already installed
2216 mov(offset, base_receiver_offset);
2217 bind(L_loop_search_receiver);
2218 ldr(rscratch1, Address(mdp, offset));
2219 cmp(rscratch1, recv);
2220 br(Assembler::EQ, L_found_recv);
2221 add(offset, offset, receiver_step);
2222 sub(rscratch1, offset, end_receiver_offset);
2223 cbnz(rscratch1, L_loop_search_receiver);
2224
2225 // Fast: no receiver, but profile is full
2226 mov(offset, base_receiver_offset);
2227 bind(L_loop_search_empty);
2228 ldr(rscratch1, Address(mdp, offset));
2229 cbz(rscratch1, L_found_empty);
2230 add(offset, offset, receiver_step);
2231 sub(rscratch1, offset, end_receiver_offset);
2232 cbnz(rscratch1, L_loop_search_empty);
2233 b(L_polymorphic);
2234
2235 // Slow: try to install receiver
2236 bind(L_found_empty);
2237
2238 // Atomically swing receiver slot: null -> recv.
2239 //
2240 // The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
2241 // is used to hold the destination address. This is safe because the
2242 // offset is no longer needed after the address is computed.
2243
2244 lea(rscratch2, Address(mdp, offset));
2245 cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
2246 /*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
2247
2248 // CAS success means the slot now has the receiver we want. CAS failure means
2249 // something had claimed the slot concurrently: it can be the same receiver we want,
2250 // or something else. Since this is a slow path, we can optimize for code density,
2251 // and just restart the search from the beginning.
2252 b(L_restart);
2253
2254 // Counter updates:
2255
2256 // Increment polymorphic counter instead of receiver slot.
2257 bind(L_polymorphic);
2258 mov(offset, poly_count_offset);
2259 b(L_count_update);
2260
2261 // Found a receiver, convert its slot offset to corresponding count offset.
2262 bind(L_found_recv);
2263 add(offset, offset, receiver_to_count_step);
2264
2265 bind(L_count_update);
2266 increment(Address(mdp, offset), DataLayout::counter_increment);
2267 }
2268
2269
2270 void MacroAssembler::call_VM_leaf_base(address entry_point,
2271 int number_of_arguments,
2272 Label *retaddr) {
2273 Label E, L;
2274
2275 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2276
2277 mov(rscratch1, RuntimeAddress(entry_point));
2278 blr(rscratch1);
2279 if (retaddr)
2280 bind(*retaddr);
2281
2282 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2283 }
2284
2285 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2286 call_VM_leaf_base(entry_point, number_of_arguments);
2287 }
2288
2289 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2290 pass_arg0(this, arg_0);
2291 call_VM_leaf_base(entry_point, 1);
2292 }
2293
2294 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2295 assert_different_registers(arg_1, c_rarg0);
2296 pass_arg0(this, arg_0);
2297 pass_arg1(this, arg_1);
2298 call_VM_leaf_base(entry_point, 2);
2299 }
2300
2301 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
2302 Register arg_1, Register arg_2) {
2303 assert_different_registers(arg_1, c_rarg0);
2304 assert_different_registers(arg_2, c_rarg0, c_rarg1);
2305 pass_arg0(this, arg_0);
2306 pass_arg1(this, arg_1);
2307 pass_arg2(this, arg_2);
2308 call_VM_leaf_base(entry_point, 3);
2309 }
2310
2311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2312 pass_arg0(this, arg_0);
2313 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2314 }
2315
2316 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2317
2318 assert_different_registers(arg_0, c_rarg1);
2319 pass_arg1(this, arg_1);
2320 pass_arg0(this, arg_0);
2321 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2322 }
2323
2324 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2325 assert_different_registers(arg_0, c_rarg1, c_rarg2);
2326 assert_different_registers(arg_1, c_rarg2);
2327 pass_arg2(this, arg_2);
2328 pass_arg1(this, arg_1);
2329 pass_arg0(this, arg_0);
2330 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2331 }
2332
2333 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2334 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2335 assert_different_registers(arg_1, c_rarg2, c_rarg3);
2336 assert_different_registers(arg_2, c_rarg3);
2337 pass_arg3(this, arg_3);
2338 pass_arg2(this, arg_2);
2339 pass_arg1(this, arg_1);
2340 pass_arg0(this, arg_0);
2341 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2342 }
2343
2344 void MacroAssembler::null_check(Register reg, int offset) {
2345 if (needs_explicit_null_check(offset)) {
2346 // provoke OS null exception if reg is null by
2347 // accessing M[reg] w/o changing any registers
2348 // NOTE: this is plenty to provoke a segv
2349 ldr(zr, Address(reg));
2350 } else {
2351 // nothing to do, (later) access of M[reg + offset]
2352 // will provoke OS null exception if reg is null
2353 }
2354 }
2355
2356 // MacroAssembler protected routines needed to implement
2357 // public methods
2358
2359 void MacroAssembler::mov(Register r, Address dest) {
2360 code_section()->relocate(pc(), dest.rspec());
2361 uint64_t imm64 = (uint64_t)dest.target();
2362 movptr(r, imm64);
2363 }
2364
2365 // Move a constant pointer into r. In AArch64 mode the virtual
2366 // address space is 48 bits in size, so we only need three
2367 // instructions to create a patchable instruction sequence that can
2368 // reach anywhere.
2369 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2370 #ifndef PRODUCT
2371 {
2372 char buffer[64];
2373 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2374 block_comment(buffer);
2375 }
2376 #endif
2377 assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2378 movz(r, imm64 & 0xffff);
2379 imm64 >>= 16;
2380 movk(r, imm64 & 0xffff, 16);
2381 imm64 >>= 16;
2382 movk(r, imm64 & 0xffff, 32);
2383 }
2384
2385 // Macro to mov replicated immediate to vector register.
2386 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2387 // the upper 56/48/32 bits must be zeros for B/H/S type.
2388 // Vd will get the following values for different arrangements in T
2389 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh
2390 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh
2391 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh
2392 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh
2393 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
2394 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
2395 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh
2396 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh
2397 // Clobbers rscratch1
2398 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2399 assert(T != T1Q, "unsupported");
2400 if (T == T1D || T == T2D) {
2401 int imm = operand_valid_for_movi_immediate(imm64, T);
2402 if (-1 != imm) {
2403 movi(Vd, T, imm);
2404 } else {
2405 mov(rscratch1, imm64);
2406 dup(Vd, T, rscratch1);
2407 }
2408 return;
2409 }
2410
2411 #ifdef ASSERT
2412 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2413 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2414 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2415 #endif
2416 int shift = operand_valid_for_movi_immediate(imm64, T);
2417 uint32_t imm32 = imm64 & 0xffffffffULL;
2418 if (shift >= 0) {
2419 movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2420 } else {
2421 movw(rscratch1, imm32);
2422 dup(Vd, T, rscratch1);
2423 }
2424 }
2425
2426 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2427 {
2428 #ifndef PRODUCT
2429 {
2430 char buffer[64];
2431 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2432 block_comment(buffer);
2433 }
2434 #endif
2435 if (operand_valid_for_logical_immediate(false, imm64)) {
2436 orr(dst, zr, imm64);
2437 } else {
2438 // we can use a combination of MOVZ or MOVN with
2439 // MOVK to build up the constant
2440 uint64_t imm_h[4];
2441 int zero_count = 0;
2442 int neg_count = 0;
2443 int i;
2444 for (i = 0; i < 4; i++) {
2445 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2446 if (imm_h[i] == 0) {
2447 zero_count++;
2448 } else if (imm_h[i] == 0xffffL) {
2449 neg_count++;
2450 }
2451 }
2452 if (zero_count == 4) {
2453 // one MOVZ will do
2454 movz(dst, 0);
2455 } else if (neg_count == 4) {
2456 // one MOVN will do
2457 movn(dst, 0);
2458 } else if (zero_count == 3) {
2459 for (i = 0; i < 4; i++) {
2460 if (imm_h[i] != 0L) {
2461 movz(dst, (uint32_t)imm_h[i], (i << 4));
2462 break;
2463 }
2464 }
2465 } else if (neg_count == 3) {
2466 // one MOVN will do
2467 for (int i = 0; i < 4; i++) {
2468 if (imm_h[i] != 0xffffL) {
2469 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2470 break;
2471 }
2472 }
2473 } else if (zero_count == 2) {
2474 // one MOVZ and one MOVK will do
2475 for (i = 0; i < 3; i++) {
2476 if (imm_h[i] != 0L) {
2477 movz(dst, (uint32_t)imm_h[i], (i << 4));
2478 i++;
2479 break;
2480 }
2481 }
2482 for (;i < 4; i++) {
2483 if (imm_h[i] != 0L) {
2484 movk(dst, (uint32_t)imm_h[i], (i << 4));
2485 }
2486 }
2487 } else if (neg_count == 2) {
2488 // one MOVN and one MOVK will do
2489 for (i = 0; i < 4; i++) {
2490 if (imm_h[i] != 0xffffL) {
2491 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2492 i++;
2493 break;
2494 }
2495 }
2496 for (;i < 4; i++) {
2497 if (imm_h[i] != 0xffffL) {
2498 movk(dst, (uint32_t)imm_h[i], (i << 4));
2499 }
2500 }
2501 } else if (zero_count == 1) {
2502 // one MOVZ and two MOVKs will do
2503 for (i = 0; i < 4; i++) {
2504 if (imm_h[i] != 0L) {
2505 movz(dst, (uint32_t)imm_h[i], (i << 4));
2506 i++;
2507 break;
2508 }
2509 }
2510 for (;i < 4; i++) {
2511 if (imm_h[i] != 0x0L) {
2512 movk(dst, (uint32_t)imm_h[i], (i << 4));
2513 }
2514 }
2515 } else if (neg_count == 1) {
2516 // one MOVN and two MOVKs will do
2517 for (i = 0; i < 4; i++) {
2518 if (imm_h[i] != 0xffffL) {
2519 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2520 i++;
2521 break;
2522 }
2523 }
2524 for (;i < 4; i++) {
2525 if (imm_h[i] != 0xffffL) {
2526 movk(dst, (uint32_t)imm_h[i], (i << 4));
2527 }
2528 }
2529 } else {
2530 // use a MOVZ and 3 MOVKs (makes it easier to debug)
2531 movz(dst, (uint32_t)imm_h[0], 0);
2532 for (i = 1; i < 4; i++) {
2533 movk(dst, (uint32_t)imm_h[i], (i << 4));
2534 }
2535 }
2536 }
2537 }
2538
2539 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2540 {
2541 #ifndef PRODUCT
2542 {
2543 char buffer[64];
2544 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2545 block_comment(buffer);
2546 }
2547 #endif
2548 if (operand_valid_for_logical_immediate(true, imm32)) {
2549 orrw(dst, zr, imm32);
2550 } else {
2551 // we can use MOVZ, MOVN or two calls to MOVK to build up the
2552 // constant
2553 uint32_t imm_h[2];
2554 imm_h[0] = imm32 & 0xffff;
2555 imm_h[1] = ((imm32 >> 16) & 0xffff);
2556 if (imm_h[0] == 0) {
2557 movzw(dst, imm_h[1], 16);
2558 } else if (imm_h[0] == 0xffff) {
2559 movnw(dst, imm_h[1] ^ 0xffff, 16);
2560 } else if (imm_h[1] == 0) {
2561 movzw(dst, imm_h[0], 0);
2562 } else if (imm_h[1] == 0xffff) {
2563 movnw(dst, imm_h[0] ^ 0xffff, 0);
2564 } else {
2565 // use a MOVZ and MOVK (makes it easier to debug)
2566 movzw(dst, imm_h[0], 0);
2567 movkw(dst, imm_h[1], 16);
2568 }
2569 }
2570 }
2571
2572 // Form an address from base + offset in Rd. Rd may or may
2573 // not actually be used: you must use the Address that is returned.
2574 // It is up to you to ensure that the shift provided matches the size
2575 // of your data.
2576 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2577 if (Address::offset_ok_for_immed(byte_offset, shift))
2578 // It fits; no need for any heroics
2579 return Address(base, byte_offset);
2580
2581 // Don't do anything clever with negative or misaligned offsets
2582 unsigned mask = (1 << shift) - 1;
2583 if (byte_offset < 0 || byte_offset & mask) {
2584 mov(Rd, byte_offset);
2585 add(Rd, base, Rd);
2586 return Address(Rd);
2587 }
2588
2589 // See if we can do this with two 12-bit offsets
2590 {
2591 uint64_t word_offset = byte_offset >> shift;
2592 uint64_t masked_offset = word_offset & 0xfff000;
2593 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2594 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2595 add(Rd, base, masked_offset << shift);
2596 word_offset -= masked_offset;
2597 return Address(Rd, word_offset << shift);
2598 }
2599 }
2600
2601 // Do it the hard way
2602 mov(Rd, byte_offset);
2603 add(Rd, base, Rd);
2604 return Address(Rd);
2605 }
2606
2607 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2608 bool want_remainder, Register scratch)
2609 {
2610 // Full implementation of Java idiv and irem. The function
2611 // returns the (pc) offset of the div instruction - may be needed
2612 // for implicit exceptions.
2613 //
2614 // constraint : ra/rb =/= scratch
2615 // normal case
2616 //
2617 // input : ra: dividend
2618 // rb: divisor
2619 //
2620 // result: either
2621 // quotient (= ra idiv rb)
2622 // remainder (= ra irem rb)
2623
2624 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2625
2626 int idivl_offset = offset();
2627 if (! want_remainder) {
2628 sdivw(result, ra, rb);
2629 } else {
2630 sdivw(scratch, ra, rb);
2631 Assembler::msubw(result, scratch, rb, ra);
2632 }
2633
2634 return idivl_offset;
2635 }
2636
2637 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2638 bool want_remainder, Register scratch)
2639 {
2640 // Full implementation of Java ldiv and lrem. The function
2641 // returns the (pc) offset of the div instruction - may be needed
2642 // for implicit exceptions.
2643 //
2644 // constraint : ra/rb =/= scratch
2645 // normal case
2646 //
2647 // input : ra: dividend
2648 // rb: divisor
2649 //
2650 // result: either
2651 // quotient (= ra idiv rb)
2652 // remainder (= ra irem rb)
2653
2654 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2655
2656 int idivq_offset = offset();
2657 if (! want_remainder) {
2658 sdiv(result, ra, rb);
2659 } else {
2660 sdiv(scratch, ra, rb);
2661 Assembler::msub(result, scratch, rb, ra);
2662 }
2663
2664 return idivq_offset;
2665 }
2666
2667 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2668 address prev = pc() - NativeMembar::instruction_size;
2669 address last = code()->last_insn();
2670 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2671 NativeMembar *bar = NativeMembar_at(prev);
2672 if (AlwaysMergeDMB) {
2673 bar->set_kind(bar->get_kind() | order_constraint);
2674 BLOCK_COMMENT("merged membar(always)");
2675 return;
2676 }
2677 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2678 // doing so would introduce a StoreLoad which the caller did not
2679 // intend
2680 if (bar->get_kind() == order_constraint
2681 || bar->get_kind() == AnyAny
2682 || order_constraint == AnyAny) {
2683 // We are merging two memory barrier instructions. On AArch64 we
2684 // can do this simply by ORing them together.
2685 bar->set_kind(bar->get_kind() | order_constraint);
2686 BLOCK_COMMENT("merged membar");
2687 return;
2688 } else {
2689 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2690 // We need check the last 2 instructions
2691 address prev2 = prev - NativeMembar::instruction_size;
2692 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2693 NativeMembar *bar2 = NativeMembar_at(prev2);
2694 assert(bar2->get_kind() == order_constraint, "it should be merged before");
2695 BLOCK_COMMENT("merged membar(elided)");
2696 return;
2697 }
2698 }
2699 }
2700 code()->set_last_insn(pc());
2701 dmb(Assembler::barrier(order_constraint));
2702 }
2703
2704 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2705 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2706 merge_ldst(rt, adr, size_in_bytes, is_store);
2707 code()->clear_last_insn();
2708 return true;
2709 } else {
2710 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2711 const uint64_t mask = size_in_bytes - 1;
2712 if (adr.getMode() == Address::base_plus_offset &&
2713 (adr.offset() & mask) == 0) { // only supports base_plus_offset.
2714 code()->set_last_insn(pc());
2715 }
2716 return false;
2717 }
2718 }
2719
2720 void MacroAssembler::ldr(Register Rx, const Address &adr) {
2721 // We always try to merge two adjacent loads into one ldp.
2722 if (!try_merge_ldst(Rx, adr, 8, false)) {
2723 Assembler::ldr(Rx, adr);
2724 }
2725 }
2726
2727 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2728 // We always try to merge two adjacent loads into one ldp.
2729 if (!try_merge_ldst(Rw, adr, 4, false)) {
2730 Assembler::ldrw(Rw, adr);
2731 }
2732 }
2733
2734 void MacroAssembler::str(Register Rx, const Address &adr) {
2735 // We always try to merge two adjacent stores into one stp.
2736 if (!try_merge_ldst(Rx, adr, 8, true)) {
2737 Assembler::str(Rx, adr);
2738 }
2739 }
2740
2741 void MacroAssembler::strw(Register Rw, const Address &adr) {
2742 // We always try to merge two adjacent stores into one stp.
2743 if (!try_merge_ldst(Rw, adr, 4, true)) {
2744 Assembler::strw(Rw, adr);
2745 }
2746 }
2747
2748 // MacroAssembler routines found actually to be needed
2749
2750 void MacroAssembler::push(Register src)
2751 {
2752 str(src, Address(pre(esp, -1 * wordSize)));
2753 }
2754
2755 void MacroAssembler::pop(Register dst)
2756 {
2757 ldr(dst, Address(post(esp, 1 * wordSize)));
2758 }
2759
2760 // Note: load_unsigned_short used to be called load_unsigned_word.
2761 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2762 int off = offset();
2763 ldrh(dst, src);
2764 return off;
2765 }
2766
2767 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2768 int off = offset();
2769 ldrb(dst, src);
2770 return off;
2771 }
2772
2773 int MacroAssembler::load_signed_short(Register dst, Address src) {
2774 int off = offset();
2775 ldrsh(dst, src);
2776 return off;
2777 }
2778
2779 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2780 int off = offset();
2781 ldrsb(dst, src);
2782 return off;
2783 }
2784
2785 int MacroAssembler::load_signed_short32(Register dst, Address src) {
2786 int off = offset();
2787 ldrshw(dst, src);
2788 return off;
2789 }
2790
2791 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2792 int off = offset();
2793 ldrsbw(dst, src);
2794 return off;
2795 }
2796
2797 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2798 switch (size_in_bytes) {
2799 case 8: ldr(dst, src); break;
2800 case 4: ldrw(dst, src); break;
2801 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2802 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2803 default: ShouldNotReachHere();
2804 }
2805 }
2806
2807 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2808 switch (size_in_bytes) {
2809 case 8: str(src, dst); break;
2810 case 4: strw(src, dst); break;
2811 case 2: strh(src, dst); break;
2812 case 1: strb(src, dst); break;
2813 default: ShouldNotReachHere();
2814 }
2815 }
2816
2817 void MacroAssembler::decrementw(Register reg, int value)
2818 {
2819 if (value < 0) { incrementw(reg, -value); return; }
2820 if (value == 0) { return; }
2821 if (value < (1 << 12)) { subw(reg, reg, value); return; }
2822 /* else */ {
2823 guarantee(reg != rscratch2, "invalid dst for register decrement");
2824 movw(rscratch2, (unsigned)value);
2825 subw(reg, reg, rscratch2);
2826 }
2827 }
2828
2829 void MacroAssembler::decrement(Register reg, int value)
2830 {
2831 if (value < 0) { increment(reg, -value); return; }
2832 if (value == 0) { return; }
2833 if (value < (1 << 12)) { sub(reg, reg, value); return; }
2834 /* else */ {
2835 assert(reg != rscratch2, "invalid dst for register decrement");
2836 mov(rscratch2, (uint64_t)value);
2837 sub(reg, reg, rscratch2);
2838 }
2839 }
2840
2841 void MacroAssembler::decrementw(Address dst, int value)
2842 {
2843 assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2844 if (dst.getMode() == Address::literal) {
2845 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2846 lea(rscratch2, dst);
2847 dst = Address(rscratch2);
2848 }
2849 ldrw(rscratch1, dst);
2850 decrementw(rscratch1, value);
2851 strw(rscratch1, dst);
2852 }
2853
2854 void MacroAssembler::decrement(Address dst, int value)
2855 {
2856 assert(!dst.uses(rscratch1), "invalid address for decrement");
2857 if (dst.getMode() == Address::literal) {
2858 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2859 lea(rscratch2, dst);
2860 dst = Address(rscratch2);
2861 }
2862 ldr(rscratch1, dst);
2863 decrement(rscratch1, value);
2864 str(rscratch1, dst);
2865 }
2866
2867 void MacroAssembler::incrementw(Register reg, int value)
2868 {
2869 if (value < 0) { decrementw(reg, -value); return; }
2870 if (value == 0) { return; }
2871 if (value < (1 << 12)) { addw(reg, reg, value); return; }
2872 /* else */ {
2873 assert(reg != rscratch2, "invalid dst for register increment");
2874 movw(rscratch2, (unsigned)value);
2875 addw(reg, reg, rscratch2);
2876 }
2877 }
2878
2879 void MacroAssembler::increment(Register reg, int value)
2880 {
2881 if (value < 0) { decrement(reg, -value); return; }
2882 if (value == 0) { return; }
2883 if (value < (1 << 12)) { add(reg, reg, value); return; }
2884 /* else */ {
2885 assert(reg != rscratch2, "invalid dst for register increment");
2886 movw(rscratch2, (unsigned)value);
2887 add(reg, reg, rscratch2);
2888 }
2889 }
2890
2891 void MacroAssembler::incrementw(Address dst, int value)
2892 {
2893 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2894 if (dst.getMode() == Address::literal) {
2895 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2896 lea(rscratch2, dst);
2897 dst = Address(rscratch2);
2898 }
2899 ldrw(rscratch1, dst);
2900 incrementw(rscratch1, value);
2901 strw(rscratch1, dst);
2902 }
2903
2904 void MacroAssembler::increment(Address dst, int value)
2905 {
2906 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2907 if (dst.getMode() == Address::literal) {
2908 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2909 lea(rscratch2, dst);
2910 dst = Address(rscratch2);
2911 }
2912 ldr(rscratch1, dst);
2913 increment(rscratch1, value);
2914 str(rscratch1, dst);
2915 }
2916
2917 // Push lots of registers in the bit set supplied. Don't push sp.
2918 // Return the number of words pushed
2919 int MacroAssembler::push(unsigned int bitset, Register stack) {
2920 int words_pushed = 0;
2921
2922 // Scan bitset to accumulate register pairs
2923 unsigned char regs[32];
2924 int count = 0;
2925 for (int reg = 0; reg <= 30; reg++) {
2926 if (1 & bitset)
2927 regs[count++] = reg;
2928 bitset >>= 1;
2929 }
2930 regs[count++] = zr->raw_encoding();
2931 count &= ~1; // Only push an even number of regs
2932
2933 if (count) {
2934 stp(as_Register(regs[0]), as_Register(regs[1]),
2935 Address(pre(stack, -count * wordSize)));
2936 words_pushed += 2;
2937 }
2938 for (int i = 2; i < count; i += 2) {
2939 stp(as_Register(regs[i]), as_Register(regs[i+1]),
2940 Address(stack, i * wordSize));
2941 words_pushed += 2;
2942 }
2943
2944 assert(words_pushed == count, "oops, pushed != count");
2945
2946 return count;
2947 }
2948
2949 int MacroAssembler::pop(unsigned int bitset, Register stack) {
2950 int words_pushed = 0;
2951
2952 // Scan bitset to accumulate register pairs
2953 unsigned char regs[32];
2954 int count = 0;
2955 for (int reg = 0; reg <= 30; reg++) {
2956 if (1 & bitset)
2957 regs[count++] = reg;
2958 bitset >>= 1;
2959 }
2960 regs[count++] = zr->raw_encoding();
2961 count &= ~1;
2962
2963 for (int i = 2; i < count; i += 2) {
2964 ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2965 Address(stack, i * wordSize));
2966 words_pushed += 2;
2967 }
2968 if (count) {
2969 ldp(as_Register(regs[0]), as_Register(regs[1]),
2970 Address(post(stack, count * wordSize)));
2971 words_pushed += 2;
2972 }
2973
2974 assert(words_pushed == count, "oops, pushed != count");
2975
2976 return count;
2977 }
2978
2979 // Push lots of registers in the bit set supplied. Don't push sp.
2980 // Return the number of dwords pushed
2981 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
2982 int words_pushed = 0;
2983 bool use_sve = false;
2984 int sve_vector_size_in_bytes = 0;
2985
2986 #ifdef COMPILER2
2987 use_sve = Matcher::supports_scalable_vector();
2988 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2989 #endif
2990
2991 // Scan bitset to accumulate register pairs
2992 unsigned char regs[32];
2993 int count = 0;
2994 for (int reg = 0; reg <= 31; reg++) {
2995 if (1 & bitset)
2996 regs[count++] = reg;
2997 bitset >>= 1;
2998 }
2999
3000 if (count == 0) {
3001 return 0;
3002 }
3003
3004 if (mode == PushPopFull) {
3005 if (use_sve && sve_vector_size_in_bytes > 16) {
3006 mode = PushPopSVE;
3007 } else {
3008 mode = PushPopNeon;
3009 }
3010 }
3011
3012 #ifndef PRODUCT
3013 {
3014 char buffer[48];
3015 if (mode == PushPopSVE) {
3016 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
3017 } else if (mode == PushPopNeon) {
3018 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
3019 } else {
3020 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
3021 }
3022 block_comment(buffer);
3023 }
3024 #endif
3025
3026 if (mode == PushPopSVE) {
3027 sub(stack, stack, sve_vector_size_in_bytes * count);
3028 for (int i = 0; i < count; i++) {
3029 sve_str(as_FloatRegister(regs[i]), Address(stack, i));
3030 }
3031 return count * sve_vector_size_in_bytes / 8;
3032 }
3033
3034 if (mode == PushPopNeon) {
3035 if (count == 1) {
3036 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
3037 return 2;
3038 }
3039
3040 bool odd = (count & 1) == 1;
3041 int push_slots = count + (odd ? 1 : 0);
3042
3043 // Always pushing full 128 bit registers.
3044 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
3045 words_pushed += 2;
3046
3047 for (int i = 2; i + 1 < count; i += 2) {
3048 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3049 words_pushed += 2;
3050 }
3051
3052 if (odd) {
3053 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3054 words_pushed++;
3055 }
3056
3057 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3058 return count * 2;
3059 }
3060
3061 if (mode == PushPopFp) {
3062 bool odd = (count & 1) == 1;
3063 int push_slots = count + (odd ? 1 : 0);
3064
3065 if (count == 1) {
3066 // Stack pointer must be 16 bytes aligned
3067 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
3068 return 1;
3069 }
3070
3071 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
3072 words_pushed += 2;
3073
3074 for (int i = 2; i + 1 < count; i += 2) {
3075 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3076 words_pushed += 2;
3077 }
3078
3079 if (odd) {
3080 // Stack pointer must be 16 bytes aligned
3081 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3082 words_pushed++;
3083 }
3084
3085 assert(words_pushed == count, "oops, pushed != count");
3086
3087 return count;
3088 }
3089
3090 return 0;
3091 }
3092
3093 // Return the number of dwords popped
3094 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
3095 int words_pushed = 0;
3096 bool use_sve = false;
3097 int sve_vector_size_in_bytes = 0;
3098
3099 #ifdef COMPILER2
3100 use_sve = Matcher::supports_scalable_vector();
3101 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3102 #endif
3103 // Scan bitset to accumulate register pairs
3104 unsigned char regs[32];
3105 int count = 0;
3106 for (int reg = 0; reg <= 31; reg++) {
3107 if (1 & bitset)
3108 regs[count++] = reg;
3109 bitset >>= 1;
3110 }
3111
3112 if (count == 0) {
3113 return 0;
3114 }
3115
3116 if (mode == PushPopFull) {
3117 if (use_sve && sve_vector_size_in_bytes > 16) {
3118 mode = PushPopSVE;
3119 } else {
3120 mode = PushPopNeon;
3121 }
3122 }
3123
3124 #ifndef PRODUCT
3125 {
3126 char buffer[48];
3127 if (mode == PushPopSVE) {
3128 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
3129 } else if (mode == PushPopNeon) {
3130 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
3131 } else {
3132 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
3133 }
3134 block_comment(buffer);
3135 }
3136 #endif
3137
3138 if (mode == PushPopSVE) {
3139 for (int i = count - 1; i >= 0; i--) {
3140 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
3141 }
3142 add(stack, stack, sve_vector_size_in_bytes * count);
3143 return count * sve_vector_size_in_bytes / 8;
3144 }
3145
3146 if (mode == PushPopNeon) {
3147 if (count == 1) {
3148 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
3149 return 2;
3150 }
3151
3152 bool odd = (count & 1) == 1;
3153 int push_slots = count + (odd ? 1 : 0);
3154
3155 if (odd) {
3156 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3157 words_pushed++;
3158 }
3159
3160 for (int i = 2; i + 1 < count; i += 2) {
3161 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3162 words_pushed += 2;
3163 }
3164
3165 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
3166 words_pushed += 2;
3167
3168 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3169
3170 return count * 2;
3171 }
3172
3173 if (mode == PushPopFp) {
3174 bool odd = (count & 1) == 1;
3175 int push_slots = count + (odd ? 1 : 0);
3176
3177 if (count == 1) {
3178 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
3179 return 1;
3180 }
3181
3182 if (odd) {
3183 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3184 words_pushed++;
3185 }
3186
3187 for (int i = 2; i + 1 < count; i += 2) {
3188 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3189 words_pushed += 2;
3190 }
3191
3192 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
3193 words_pushed += 2;
3194
3195 assert(words_pushed == count, "oops, pushed != count");
3196
3197 return count;
3198 }
3199
3200 return 0;
3201 }
3202
3203 // Return the number of dwords pushed
3204 int MacroAssembler::push_p(unsigned int bitset, Register stack) {
3205 bool use_sve = false;
3206 int sve_predicate_size_in_slots = 0;
3207
3208 #ifdef COMPILER2
3209 use_sve = Matcher::supports_scalable_vector();
3210 if (use_sve) {
3211 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3212 }
3213 #endif
3214
3215 if (!use_sve) {
3216 return 0;
3217 }
3218
3219 unsigned char regs[PRegister::number_of_registers];
3220 int count = 0;
3221 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3222 if (1 & bitset)
3223 regs[count++] = reg;
3224 bitset >>= 1;
3225 }
3226
3227 if (count == 0) {
3228 return 0;
3229 }
3230
3231 int total_push_bytes = align_up(sve_predicate_size_in_slots *
3232 VMRegImpl::stack_slot_size * count, 16);
3233 sub(stack, stack, total_push_bytes);
3234 for (int i = 0; i < count; i++) {
3235 sve_str(as_PRegister(regs[i]), Address(stack, i));
3236 }
3237 return total_push_bytes / 8;
3238 }
3239
3240 // Return the number of dwords popped
3241 int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
3242 bool use_sve = false;
3243 int sve_predicate_size_in_slots = 0;
3244
3245 #ifdef COMPILER2
3246 use_sve = Matcher::supports_scalable_vector();
3247 if (use_sve) {
3248 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3249 }
3250 #endif
3251
3252 if (!use_sve) {
3253 return 0;
3254 }
3255
3256 unsigned char regs[PRegister::number_of_registers];
3257 int count = 0;
3258 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3259 if (1 & bitset)
3260 regs[count++] = reg;
3261 bitset >>= 1;
3262 }
3263
3264 if (count == 0) {
3265 return 0;
3266 }
3267
3268 int total_pop_bytes = align_up(sve_predicate_size_in_slots *
3269 VMRegImpl::stack_slot_size * count, 16);
3270 for (int i = count - 1; i >= 0; i--) {
3271 sve_ldr(as_PRegister(regs[i]), Address(stack, i));
3272 }
3273 add(stack, stack, total_pop_bytes);
3274 return total_pop_bytes / 8;
3275 }
3276
3277 #ifdef ASSERT
3278 void MacroAssembler::verify_heapbase(const char* msg) {
3279 #if 0
3280 assert (Universe::heap() != nullptr, "java heap should be initialized");
3281 if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
3282 // rheapbase is allocated as general register
3283 return;
3284 }
3285 if (CheckCompressedOops) {
3286 Label ok;
3287 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
3288 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3289 br(Assembler::EQ, ok);
3290 stop(msg);
3291 bind(ok);
3292 pop(1 << rscratch1->encoding(), sp);
3293 }
3294 #endif
3295 }
3296 #endif
3297
3298 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3299 assert_different_registers(value, tmp1, tmp2);
3300 Label done, tagged, weak_tagged;
3301
3302 cbz(value, done); // Use null as-is.
3303 tst(value, JNIHandles::tag_mask); // Test for tag.
3304 br(Assembler::NE, tagged);
3305
3306 // Resolve local handle
3307 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
3308 verify_oop(value);
3309 b(done);
3310
3311 bind(tagged);
3312 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
3313 tbnz(value, 0, weak_tagged); // Test for weak tag.
3314
3315 // Resolve global handle
3316 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3317 verify_oop(value);
3318 b(done);
3319
3320 bind(weak_tagged);
3321 // Resolve jweak.
3322 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3323 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3324 verify_oop(value);
3325
3326 bind(done);
3327 }
3328
3329 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3330 assert_different_registers(value, tmp1, tmp2);
3331 Label done;
3332
3333 cbz(value, done); // Use null as-is.
3334
3335 #ifdef ASSERT
3336 {
3337 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3338 Label valid_global_tag;
3339 tbnz(value, 1, valid_global_tag); // Test for global tag
3340 stop("non global jobject using resolve_global_jobject");
3341 bind(valid_global_tag);
3342 }
3343 #endif
3344
3345 // Resolve global handle
3346 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3347 verify_oop(value);
3348
3349 bind(done);
3350 }
3351
3352 void MacroAssembler::stop(const char* msg) {
3353 // Skip AOT caching C strings in scratch buffer.
3354 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
3355 BLOCK_COMMENT(str);
3356 // load msg into r0 so we can access it from the signal handler
3357 // ExternalAddress enables saving and restoring via the code cache
3358 lea(c_rarg0, ExternalAddress((address) str));
3359 dcps1(0xdeae);
3360 }
3361
3362 void MacroAssembler::unimplemented(const char* what) {
3363 const char* buf = nullptr;
3364 {
3365 ResourceMark rm;
3366 stringStream ss;
3367 ss.print("unimplemented: %s", what);
3368 buf = code_string(ss.as_string());
3369 }
3370 stop(buf);
3371 }
3372
3373 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3374 #ifdef ASSERT
3375 Label OK;
3376 br(cc, OK);
3377 stop(msg);
3378 bind(OK);
3379 #endif
3380 }
3381
3382 // If a constant does not fit in an immediate field, generate some
3383 // number of MOV instructions and then perform the operation.
3384 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3385 add_sub_imm_insn insn1,
3386 add_sub_reg_insn insn2,
3387 bool is32) {
3388 assert(Rd != zr, "Rd = zr and not setting flags?");
3389 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3390 if (fits) {
3391 (this->*insn1)(Rd, Rn, imm);
3392 } else {
3393 if (g_uabs(imm) < (1 << 24)) {
3394 (this->*insn1)(Rd, Rn, imm & -(1 << 12));
3395 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3396 } else {
3397 assert_different_registers(Rd, Rn);
3398 mov(Rd, imm);
3399 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3400 }
3401 }
3402 }
3403
3404 // Separate vsn which sets the flags. Optimisations are more restricted
3405 // because we must set the flags correctly.
3406 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3407 add_sub_imm_insn insn1,
3408 add_sub_reg_insn insn2,
3409 bool is32) {
3410 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3411 if (fits) {
3412 (this->*insn1)(Rd, Rn, imm);
3413 } else {
3414 assert_different_registers(Rd, Rn);
3415 assert(Rd != zr, "overflow in immediate operand");
3416 mov(Rd, imm);
3417 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3418 }
3419 }
3420
3421
3422 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3423 if (increment.is_register()) {
3424 add(Rd, Rn, increment.as_register());
3425 } else {
3426 add(Rd, Rn, increment.as_constant());
3427 }
3428 }
3429
3430 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3431 if (increment.is_register()) {
3432 addw(Rd, Rn, increment.as_register());
3433 } else {
3434 addw(Rd, Rn, increment.as_constant());
3435 }
3436 }
3437
3438 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3439 if (decrement.is_register()) {
3440 sub(Rd, Rn, decrement.as_register());
3441 } else {
3442 sub(Rd, Rn, decrement.as_constant());
3443 }
3444 }
3445
3446 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3447 if (decrement.is_register()) {
3448 subw(Rd, Rn, decrement.as_register());
3449 } else {
3450 subw(Rd, Rn, decrement.as_constant());
3451 }
3452 }
3453
3454 void MacroAssembler::reinit_heapbase()
3455 {
3456 if (UseCompressedOops) {
3457 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3458 mov(rheapbase, CompressedOops::base());
3459 } else {
3460 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3461 ldr(rheapbase, Address(rheapbase));
3462 }
3463 }
3464 }
3465
3466 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3467 // doesn't retry and may fail spuriously. If the oldval is wanted,
3468 // Pass a register for the result, otherwise pass noreg.
3469
3470 // Clobbers rscratch1
3471 void MacroAssembler::cmpxchg(Register addr, Register expected,
3472 Register new_val,
3473 enum operand_size size,
3474 bool acquire, bool release,
3475 bool weak,
3476 Register result) {
3477 if (result == noreg) result = rscratch1;
3478 BLOCK_COMMENT("cmpxchg {");
3479 if (UseLSE) {
3480 mov(result, expected);
3481 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3482 compare_eq(result, expected, size);
3483 #ifdef ASSERT
3484 // Poison rscratch1 which is written on !UseLSE branch
3485 mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3486 #endif
3487 } else {
3488 Label retry_load, done;
3489 prfm(Address(addr), PSTL1STRM);
3490 bind(retry_load);
3491 load_exclusive(result, addr, size, acquire);
3492 compare_eq(result, expected, size);
3493 br(Assembler::NE, done);
3494 store_exclusive(rscratch1, new_val, addr, size, release);
3495 if (weak) {
3496 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
3497 } else {
3498 cbnzw(rscratch1, retry_load);
3499 }
3500 bind(done);
3501 }
3502 BLOCK_COMMENT("} cmpxchg");
3503 }
3504
3505 // A generic comparison. Only compares for equality, clobbers rscratch1.
3506 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3507 if (size == xword) {
3508 cmp(rm, rn);
3509 } else if (size == word) {
3510 cmpw(rm, rn);
3511 } else if (size == halfword) {
3512 eorw(rscratch1, rm, rn);
3513 ands(zr, rscratch1, 0xffff);
3514 } else if (size == byte) {
3515 eorw(rscratch1, rm, rn);
3516 ands(zr, rscratch1, 0xff);
3517 } else {
3518 ShouldNotReachHere();
3519 }
3520 }
3521
3522
3523 static bool different(Register a, RegisterOrConstant b, Register c) {
3524 if (b.is_constant())
3525 return a != c;
3526 else
3527 return a != b.as_register() && a != c && b.as_register() != c;
3528 }
3529
3530 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
3531 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3532 if (UseLSE) { \
3533 prev = prev->is_valid() ? prev : zr; \
3534 if (incr.is_register()) { \
3535 AOP(sz, incr.as_register(), prev, addr); \
3536 } else { \
3537 mov(rscratch2, incr.as_constant()); \
3538 AOP(sz, rscratch2, prev, addr); \
3539 } \
3540 return; \
3541 } \
3542 Register result = rscratch2; \
3543 if (prev->is_valid()) \
3544 result = different(prev, incr, addr) ? prev : rscratch2; \
3545 \
3546 Label retry_load; \
3547 prfm(Address(addr), PSTL1STRM); \
3548 bind(retry_load); \
3549 LDXR(result, addr); \
3550 OP(rscratch1, result, incr); \
3551 STXR(rscratch2, rscratch1, addr); \
3552 cbnzw(rscratch2, retry_load); \
3553 if (prev->is_valid() && prev != result) { \
3554 IOP(prev, rscratch1, incr); \
3555 } \
3556 }
3557
3558 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3559 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3560 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3561 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3562
3563 #undef ATOMIC_OP
3564
3565 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
3566 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3567 if (UseLSE) { \
3568 prev = prev->is_valid() ? prev : zr; \
3569 AOP(sz, newv, prev, addr); \
3570 return; \
3571 } \
3572 Register result = rscratch2; \
3573 if (prev->is_valid()) \
3574 result = different(prev, newv, addr) ? prev : rscratch2; \
3575 \
3576 Label retry_load; \
3577 prfm(Address(addr), PSTL1STRM); \
3578 bind(retry_load); \
3579 LDXR(result, addr); \
3580 STXR(rscratch1, newv, addr); \
3581 cbnzw(rscratch1, retry_load); \
3582 if (prev->is_valid() && prev != result) \
3583 mov(prev, result); \
3584 }
3585
3586 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3587 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3588 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3589 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3590 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3591 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3592
3593 #undef ATOMIC_XCHG
3594
3595 #ifndef PRODUCT
3596 extern "C" void findpc(intptr_t x);
3597 #endif
3598
3599 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3600 {
3601 // In order to get locks to work, we need to fake a in_VM state
3602 if (ShowMessageBoxOnError) {
3603 JavaThread* thread = JavaThread::current();
3604 thread->set_thread_state(_thread_in_vm);
3605 #ifndef PRODUCT
3606 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3607 ttyLocker ttyl;
3608 BytecodeCounter::print();
3609 }
3610 #endif
3611 if (os::message_box(msg, "Execution stopped, print registers?")) {
3612 ttyLocker ttyl;
3613 tty->print_cr(" pc = 0x%016" PRIx64, pc);
3614 #ifndef PRODUCT
3615 tty->cr();
3616 findpc(pc);
3617 tty->cr();
3618 #endif
3619 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3620 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3621 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3622 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3623 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3624 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3625 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3626 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3627 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3628 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3629 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3630 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3631 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3632 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3633 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3634 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3635 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3636 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3637 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3638 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3639 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3640 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3641 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3642 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3643 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3644 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3645 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3646 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3647 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3648 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3649 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3650 BREAKPOINT;
3651 }
3652 }
3653 fatal("DEBUG MESSAGE: %s", msg);
3654 }
3655
3656 RegSet MacroAssembler::call_clobbered_gp_registers() {
3657 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3658 #ifndef R18_RESERVED
3659 regs += r18_tls;
3660 #endif
3661 return regs;
3662 }
3663
3664 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3665 int step = 4 * wordSize;
3666 push(call_clobbered_gp_registers() - exclude, sp);
3667 sub(sp, sp, step);
3668 mov(rscratch1, -step);
3669 // Push v0-v7, v16-v31.
3670 for (int i = 31; i>= 4; i -= 4) {
3671 if (i <= v7->encoding() || i >= v16->encoding())
3672 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3673 as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3674 }
3675 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3676 as_FloatRegister(3), T1D, Address(sp));
3677 }
3678
3679 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3680 for (int i = 0; i < 32; i += 4) {
3681 if (i <= v7->encoding() || i >= v16->encoding())
3682 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3683 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3684 }
3685
3686 reinitialize_ptrue();
3687
3688 pop(call_clobbered_gp_registers() - exclude, sp);
3689 }
3690
3691 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3692 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3693 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3694 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3695 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3696 for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3697 sve_str(as_FloatRegister(i), Address(sp, i));
3698 }
3699 } else {
3700 int step = (save_vectors ? 8 : 4) * wordSize;
3701 mov(rscratch1, -step);
3702 sub(sp, sp, step);
3703 for (int i = 28; i >= 4; i -= 4) {
3704 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3705 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3706 }
3707 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3708 }
3709 if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3710 sub(sp, sp, total_predicate_in_bytes);
3711 for (int i = 0; i < PRegister::number_of_registers; i++) {
3712 sve_str(as_PRegister(i), Address(sp, i));
3713 }
3714 }
3715 }
3716
3717 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3718 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3719 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3720 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3721 sve_ldr(as_PRegister(i), Address(sp, i));
3722 }
3723 add(sp, sp, total_predicate_in_bytes);
3724 }
3725 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3726 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3727 sve_ldr(as_FloatRegister(i), Address(sp, i));
3728 }
3729 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3730 } else {
3731 int step = (restore_vectors ? 8 : 4) * wordSize;
3732 for (int i = 0; i <= 28; i += 4)
3733 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3734 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3735 }
3736
3737 // We may use predicate registers and rely on ptrue with SVE,
3738 // regardless of wide vector (> 8 bytes) used or not.
3739 if (use_sve) {
3740 reinitialize_ptrue();
3741 }
3742
3743 // integer registers except lr & sp
3744 pop(RegSet::range(r0, r17), sp);
3745 #ifdef R18_RESERVED
3746 ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3747 pop(RegSet::range(r20, r29), sp);
3748 #else
3749 pop(RegSet::range(r18_tls, r29), sp);
3750 #endif
3751 }
3752
3753 /**
3754 * Helpers for multiply_to_len().
3755 */
3756 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3757 Register src1, Register src2) {
3758 adds(dest_lo, dest_lo, src1);
3759 adc(dest_hi, dest_hi, zr);
3760 adds(dest_lo, dest_lo, src2);
3761 adc(final_dest_hi, dest_hi, zr);
3762 }
3763
3764 // Generate an address from (r + r1 extend offset). "size" is the
3765 // size of the operand. The result may be in rscratch2.
3766 Address MacroAssembler::offsetted_address(Register r, Register r1,
3767 Address::extend ext, int offset, int size) {
3768 if (offset || (ext.shift() % size != 0)) {
3769 lea(rscratch2, Address(r, r1, ext));
3770 return Address(rscratch2, offset);
3771 } else {
3772 return Address(r, r1, ext);
3773 }
3774 }
3775
3776 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3777 {
3778 assert(offset >= 0, "spill to negative address?");
3779 // Offset reachable ?
3780 // Not aligned - 9 bits signed offset
3781 // Aligned - 12 bits unsigned offset shifted
3782 Register base = sp;
3783 if ((offset & (size-1)) && offset >= (1<<8)) {
3784 add(tmp, base, offset & ((1<<12)-1));
3785 base = tmp;
3786 offset &= -1u<<12;
3787 }
3788
3789 if (offset >= (1<<12) * size) {
3790 add(tmp, base, offset & (((1<<12)-1)<<12));
3791 base = tmp;
3792 offset &= ~(((1<<12)-1)<<12);
3793 }
3794
3795 return Address(base, offset);
3796 }
3797
3798 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3799 assert(offset >= 0, "spill to negative address?");
3800
3801 Register base = sp;
3802
3803 // An immediate offset in the range 0 to 255 which is multiplied
3804 // by the current vector or predicate register size in bytes.
3805 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3806 return Address(base, offset / sve_reg_size_in_bytes);
3807 }
3808
3809 add(tmp, base, offset);
3810 return Address(tmp);
3811 }
3812
3813 // Checks whether offset is aligned.
3814 // Returns true if it is, else false.
3815 bool MacroAssembler::merge_alignment_check(Register base,
3816 size_t size,
3817 int64_t cur_offset,
3818 int64_t prev_offset) const {
3819 if (AvoidUnalignedAccesses) {
3820 if (base == sp) {
3821 // Checks whether low offset if aligned to pair of registers.
3822 int64_t pair_mask = size * 2 - 1;
3823 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3824 return (offset & pair_mask) == 0;
3825 } else { // If base is not sp, we can't guarantee the access is aligned.
3826 return false;
3827 }
3828 } else {
3829 int64_t mask = size - 1;
3830 // Load/store pair instruction only supports element size aligned offset.
3831 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3832 }
3833 }
3834
3835 // Checks whether current and previous loads/stores can be merged.
3836 // Returns true if it can be merged, else false.
3837 bool MacroAssembler::ldst_can_merge(Register rt,
3838 const Address &adr,
3839 size_t cur_size_in_bytes,
3840 bool is_store) const {
3841 address prev = pc() - NativeInstruction::instruction_size;
3842 address last = code()->last_insn();
3843
3844 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3845 return false;
3846 }
3847
3848 if (adr.getMode() != Address::base_plus_offset || prev != last) {
3849 return false;
3850 }
3851
3852 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3853 size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3854
3855 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3856 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3857
3858 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
3859 return false;
3860 }
3861
3862 int64_t max_offset = 63 * prev_size_in_bytes;
3863 int64_t min_offset = -64 * prev_size_in_bytes;
3864
3865 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
3866
3867 // Only same base can be merged.
3868 if (adr.base() != prev_ldst->base()) {
3869 return false;
3870 }
3871
3872 int64_t cur_offset = adr.offset();
3873 int64_t prev_offset = prev_ldst->offset();
3874 size_t diff = abs(cur_offset - prev_offset);
3875 if (diff != prev_size_in_bytes) {
3876 return false;
3877 }
3878
3879 // Following cases can not be merged:
3880 // ldr x2, [x2, #8]
3881 // ldr x3, [x2, #16]
3882 // or:
3883 // ldr x2, [x3, #8]
3884 // ldr x2, [x3, #16]
3885 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
3886 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
3887 return false;
3888 }
3889
3890 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3891 // Offset range must be in ldp/stp instruction's range.
3892 if (low_offset > max_offset || low_offset < min_offset) {
3893 return false;
3894 }
3895
3896 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
3897 return true;
3898 }
3899
3900 return false;
3901 }
3902
3903 // Merge current load/store with previous load/store into ldp/stp.
3904 void MacroAssembler::merge_ldst(Register rt,
3905 const Address &adr,
3906 size_t cur_size_in_bytes,
3907 bool is_store) {
3908
3909 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
3910
3911 Register rt_low, rt_high;
3912 address prev = pc() - NativeInstruction::instruction_size;
3913 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3914
3915 int64_t offset;
3916
3917 if (adr.offset() < prev_ldst->offset()) {
3918 offset = adr.offset();
3919 rt_low = rt;
3920 rt_high = prev_ldst->target();
3921 } else {
3922 offset = prev_ldst->offset();
3923 rt_low = prev_ldst->target();
3924 rt_high = rt;
3925 }
3926
3927 Address adr_p = Address(prev_ldst->base(), offset);
3928 // Overwrite previous generated binary.
3929 code_section()->set_end(prev);
3930
3931 const size_t sz = prev_ldst->size_in_bytes();
3932 assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
3933 if (!is_store) {
3934 BLOCK_COMMENT("merged ldr pair");
3935 if (sz == 8) {
3936 ldp(rt_low, rt_high, adr_p);
3937 } else {
3938 ldpw(rt_low, rt_high, adr_p);
3939 }
3940 } else {
3941 BLOCK_COMMENT("merged str pair");
3942 if (sz == 8) {
3943 stp(rt_low, rt_high, adr_p);
3944 } else {
3945 stpw(rt_low, rt_high, adr_p);
3946 }
3947 }
3948 }
3949
3950 /**
3951 * Multiply 64 bit by 64 bit first loop.
3952 */
3953 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
3954 Register y, Register y_idx, Register z,
3955 Register carry, Register product,
3956 Register idx, Register kdx) {
3957 //
3958 // jlong carry, x[], y[], z[];
3959 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3960 // huge_128 product = y[idx] * x[xstart] + carry;
3961 // z[kdx] = (jlong)product;
3962 // carry = (jlong)(product >>> 64);
3963 // }
3964 // z[xstart] = carry;
3965 //
3966
3967 Label L_first_loop, L_first_loop_exit;
3968 Label L_one_x, L_one_y, L_multiply;
3969
3970 subsw(xstart, xstart, 1);
3971 br(Assembler::MI, L_one_x);
3972
3973 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
3974 ldr(x_xstart, Address(rscratch1));
3975 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
3976
3977 bind(L_first_loop);
3978 subsw(idx, idx, 1);
3979 br(Assembler::MI, L_first_loop_exit);
3980 subsw(idx, idx, 1);
3981 br(Assembler::MI, L_one_y);
3982 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3983 ldr(y_idx, Address(rscratch1));
3984 ror(y_idx, y_idx, 32); // convert big-endian to little-endian
3985 bind(L_multiply);
3986
3987 // AArch64 has a multiply-accumulate instruction that we can't use
3988 // here because it has no way to process carries, so we have to use
3989 // separate add and adc instructions. Bah.
3990 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
3991 mul(product, x_xstart, y_idx);
3992 adds(product, product, carry);
3993 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
3994
3995 subw(kdx, kdx, 2);
3996 ror(product, product, 32); // back to big-endian
3997 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
3998
3999 b(L_first_loop);
4000
4001 bind(L_one_y);
4002 ldrw(y_idx, Address(y, 0));
4003 b(L_multiply);
4004
4005 bind(L_one_x);
4006 ldrw(x_xstart, Address(x, 0));
4007 b(L_first_loop);
4008
4009 bind(L_first_loop_exit);
4010 }
4011
4012 /**
4013 * Multiply 128 bit by 128. Unrolled inner loop.
4014 *
4015 */
4016 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
4017 Register carry, Register carry2,
4018 Register idx, Register jdx,
4019 Register yz_idx1, Register yz_idx2,
4020 Register tmp, Register tmp3, Register tmp4,
4021 Register tmp6, Register product_hi) {
4022
4023 // jlong carry, x[], y[], z[];
4024 // int kdx = ystart+1;
4025 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
4026 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
4027 // jlong carry2 = (jlong)(tmp3 >>> 64);
4028 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
4029 // carry = (jlong)(tmp4 >>> 64);
4030 // z[kdx+idx+1] = (jlong)tmp3;
4031 // z[kdx+idx] = (jlong)tmp4;
4032 // }
4033 // idx += 2;
4034 // if (idx > 0) {
4035 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
4036 // z[kdx+idx] = (jlong)yz_idx1;
4037 // carry = (jlong)(yz_idx1 >>> 64);
4038 // }
4039 //
4040
4041 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
4042
4043 lsrw(jdx, idx, 2);
4044
4045 bind(L_third_loop);
4046
4047 subsw(jdx, jdx, 1);
4048 br(Assembler::MI, L_third_loop_exit);
4049 subw(idx, idx, 4);
4050
4051 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4052
4053 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
4054
4055 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4056
4057 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
4058 ror(yz_idx2, yz_idx2, 32);
4059
4060 ldp(rscratch2, rscratch1, Address(tmp6, 0));
4061
4062 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4063 umulh(tmp4, product_hi, yz_idx1);
4064
4065 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
4066 ror(rscratch2, rscratch2, 32);
4067
4068 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
4069 umulh(carry2, product_hi, yz_idx2);
4070
4071 // propagate sum of both multiplications into carry:tmp4:tmp3
4072 adds(tmp3, tmp3, carry);
4073 adc(tmp4, tmp4, zr);
4074 adds(tmp3, tmp3, rscratch1);
4075 adcs(tmp4, tmp4, tmp);
4076 adc(carry, carry2, zr);
4077 adds(tmp4, tmp4, rscratch2);
4078 adc(carry, carry, zr);
4079
4080 ror(tmp3, tmp3, 32); // convert little-endian to big-endian
4081 ror(tmp4, tmp4, 32);
4082 stp(tmp4, tmp3, Address(tmp6, 0));
4083
4084 b(L_third_loop);
4085 bind (L_third_loop_exit);
4086
4087 andw (idx, idx, 0x3);
4088 cbz(idx, L_post_third_loop_done);
4089
4090 Label L_check_1;
4091 subsw(idx, idx, 2);
4092 br(Assembler::MI, L_check_1);
4093
4094 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4095 ldr(yz_idx1, Address(rscratch1, 0));
4096 ror(yz_idx1, yz_idx1, 32);
4097 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4098 umulh(tmp4, product_hi, yz_idx1);
4099 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4100 ldr(yz_idx2, Address(rscratch1, 0));
4101 ror(yz_idx2, yz_idx2, 32);
4102
4103 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
4104
4105 ror(tmp3, tmp3, 32);
4106 str(tmp3, Address(rscratch1, 0));
4107
4108 bind (L_check_1);
4109
4110 andw (idx, idx, 0x1);
4111 subsw(idx, idx, 1);
4112 br(Assembler::MI, L_post_third_loop_done);
4113 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4114 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
4115 umulh(carry2, tmp4, product_hi);
4116 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4117
4118 add2_with_carry(carry2, tmp3, tmp4, carry);
4119
4120 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4121 extr(carry, carry2, tmp3, 32);
4122
4123 bind(L_post_third_loop_done);
4124 }
4125
4126 /**
4127 * Code for BigInteger::multiplyToLen() intrinsic.
4128 *
4129 * r0: x
4130 * r1: xlen
4131 * r2: y
4132 * r3: ylen
4133 * r4: z
4134 * r5: tmp0
4135 * r10: tmp1
4136 * r11: tmp2
4137 * r12: tmp3
4138 * r13: tmp4
4139 * r14: tmp5
4140 * r15: tmp6
4141 * r16: tmp7
4142 *
4143 */
4144 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
4145 Register z, Register tmp0,
4146 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
4147 Register tmp5, Register tmp6, Register product_hi) {
4148
4149 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
4150
4151 const Register idx = tmp1;
4152 const Register kdx = tmp2;
4153 const Register xstart = tmp3;
4154
4155 const Register y_idx = tmp4;
4156 const Register carry = tmp5;
4157 const Register product = xlen;
4158 const Register x_xstart = tmp0;
4159
4160 // First Loop.
4161 //
4162 // final static long LONG_MASK = 0xffffffffL;
4163 // int xstart = xlen - 1;
4164 // int ystart = ylen - 1;
4165 // long carry = 0;
4166 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4167 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
4168 // z[kdx] = (int)product;
4169 // carry = product >>> 32;
4170 // }
4171 // z[xstart] = (int)carry;
4172 //
4173
4174 movw(idx, ylen); // idx = ylen;
4175 addw(kdx, xlen, ylen); // kdx = xlen+ylen;
4176 mov(carry, zr); // carry = 0;
4177
4178 Label L_done;
4179
4180 movw(xstart, xlen);
4181 subsw(xstart, xstart, 1);
4182 br(Assembler::MI, L_done);
4183
4184 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
4185
4186 Label L_second_loop;
4187 cbzw(kdx, L_second_loop);
4188
4189 Label L_carry;
4190 subw(kdx, kdx, 1);
4191 cbzw(kdx, L_carry);
4192
4193 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4194 lsr(carry, carry, 32);
4195 subw(kdx, kdx, 1);
4196
4197 bind(L_carry);
4198 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4199
4200 // Second and third (nested) loops.
4201 //
4202 // for (int i = xstart-1; i >= 0; i--) { // Second loop
4203 // carry = 0;
4204 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
4205 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
4206 // (z[k] & LONG_MASK) + carry;
4207 // z[k] = (int)product;
4208 // carry = product >>> 32;
4209 // }
4210 // z[i] = (int)carry;
4211 // }
4212 //
4213 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
4214
4215 const Register jdx = tmp1;
4216
4217 bind(L_second_loop);
4218 mov(carry, zr); // carry = 0;
4219 movw(jdx, ylen); // j = ystart+1
4220
4221 subsw(xstart, xstart, 1); // i = xstart-1;
4222 br(Assembler::MI, L_done);
4223
4224 str(z, Address(pre(sp, -4 * wordSize)));
4225
4226 Label L_last_x;
4227 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
4228 subsw(xstart, xstart, 1); // i = xstart-1;
4229 br(Assembler::MI, L_last_x);
4230
4231 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4232 ldr(product_hi, Address(rscratch1));
4233 ror(product_hi, product_hi, 32); // convert big-endian to little-endian
4234
4235 Label L_third_loop_prologue;
4236 bind(L_third_loop_prologue);
4237
4238 str(ylen, Address(sp, wordSize));
4239 stp(x, xstart, Address(sp, 2 * wordSize));
4240 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4241 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4242 ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4243 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
4244
4245 addw(tmp3, xlen, 1);
4246 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4247 subsw(tmp3, tmp3, 1);
4248 br(Assembler::MI, L_done);
4249
4250 lsr(carry, carry, 32);
4251 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4252 b(L_second_loop);
4253
4254 // Next infrequent code is moved outside loops.
4255 bind(L_last_x);
4256 ldrw(product_hi, Address(x, 0));
4257 b(L_third_loop_prologue);
4258
4259 bind(L_done);
4260 }
4261
4262 // Code for BigInteger::mulAdd intrinsic
4263 // out = r0
4264 // in = r1
4265 // offset = r2 (already out.length-offset)
4266 // len = r3
4267 // k = r4
4268 //
4269 // pseudo code from java implementation:
4270 // carry = 0;
4271 // offset = out.length-offset - 1;
4272 // for (int j=len-1; j >= 0; j--) {
4273 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4274 // out[offset--] = (int)product;
4275 // carry = product >>> 32;
4276 // }
4277 // return (int)carry;
4278 void MacroAssembler::mul_add(Register out, Register in, Register offset,
4279 Register len, Register k) {
4280 Label LOOP, END;
4281 // pre-loop
4282 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4283 csel(out, zr, out, Assembler::EQ);
4284 br(Assembler::EQ, END);
4285 add(in, in, len, LSL, 2); // in[j+1] address
4286 add(offset, out, offset, LSL, 2); // out[offset + 1] address
4287 mov(out, zr); // used to keep carry now
4288 BIND(LOOP);
4289 ldrw(rscratch1, Address(pre(in, -4)));
4290 madd(rscratch1, rscratch1, k, out);
4291 ldrw(rscratch2, Address(pre(offset, -4)));
4292 add(rscratch1, rscratch1, rscratch2);
4293 strw(rscratch1, Address(offset));
4294 lsr(out, rscratch1, 32);
4295 subs(len, len, 1);
4296 br(Assembler::NE, LOOP);
4297 BIND(END);
4298 }
4299
4300 /**
4301 * Emits code to update CRC-32 with a byte value according to constants in table
4302 *
4303 * @param [in,out]crc Register containing the crc.
4304 * @param [in]val Register containing the byte to fold into the CRC.
4305 * @param [in]table Register containing the table of crc constants.
4306 *
4307 * uint32_t crc;
4308 * val = crc_table[(val ^ crc) & 0xFF];
4309 * crc = val ^ (crc >> 8);
4310 *
4311 */
4312 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4313 eor(val, val, crc);
4314 andr(val, val, 0xff);
4315 ldrw(val, Address(table, val, Address::lsl(2)));
4316 eor(crc, val, crc, Assembler::LSR, 8);
4317 }
4318
4319 /**
4320 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4321 *
4322 * @param [in,out]crc Register containing the crc.
4323 * @param [in]v Register containing the 32-bit to fold into the CRC.
4324 * @param [in]table0 Register containing table 0 of crc constants.
4325 * @param [in]table1 Register containing table 1 of crc constants.
4326 * @param [in]table2 Register containing table 2 of crc constants.
4327 * @param [in]table3 Register containing table 3 of crc constants.
4328 *
4329 * uint32_t crc;
4330 * v = crc ^ v
4331 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4332 *
4333 */
4334 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4335 Register table0, Register table1, Register table2, Register table3,
4336 bool upper) {
4337 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4338 uxtb(tmp, v);
4339 ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4340 ubfx(tmp, v, 8, 8);
4341 ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4342 eor(crc, crc, tmp);
4343 ubfx(tmp, v, 16, 8);
4344 ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4345 eor(crc, crc, tmp);
4346 ubfx(tmp, v, 24, 8);
4347 ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4348 eor(crc, crc, tmp);
4349 }
4350
4351 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4352 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4353 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4354 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4355
4356 subs(tmp0, len, 384);
4357 mvnw(crc, crc);
4358 br(Assembler::GE, CRC_by128_pre);
4359 BIND(CRC_less128);
4360 subs(len, len, 32);
4361 br(Assembler::GE, CRC_by32_loop);
4362 BIND(CRC_less32);
4363 adds(len, len, 32 - 4);
4364 br(Assembler::GE, CRC_by4_loop);
4365 adds(len, len, 4);
4366 br(Assembler::GT, CRC_by1_loop);
4367 b(L_exit);
4368
4369 BIND(CRC_by32_loop);
4370 ldp(tmp0, tmp1, Address(buf));
4371 crc32x(crc, crc, tmp0);
4372 ldp(tmp2, tmp3, Address(buf, 16));
4373 crc32x(crc, crc, tmp1);
4374 add(buf, buf, 32);
4375 crc32x(crc, crc, tmp2);
4376 subs(len, len, 32);
4377 crc32x(crc, crc, tmp3);
4378 br(Assembler::GE, CRC_by32_loop);
4379 cmn(len, (u1)32);
4380 br(Assembler::NE, CRC_less32);
4381 b(L_exit);
4382
4383 BIND(CRC_by4_loop);
4384 ldrw(tmp0, Address(post(buf, 4)));
4385 subs(len, len, 4);
4386 crc32w(crc, crc, tmp0);
4387 br(Assembler::GE, CRC_by4_loop);
4388 adds(len, len, 4);
4389 br(Assembler::LE, L_exit);
4390 BIND(CRC_by1_loop);
4391 ldrb(tmp0, Address(post(buf, 1)));
4392 subs(len, len, 1);
4393 crc32b(crc, crc, tmp0);
4394 br(Assembler::GT, CRC_by1_loop);
4395 b(L_exit);
4396
4397 BIND(CRC_by128_pre);
4398 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4399 4*256*sizeof(juint) + 8*sizeof(juint));
4400 mov(crc, 0);
4401 crc32x(crc, crc, tmp0);
4402 crc32x(crc, crc, tmp1);
4403
4404 cbnz(len, CRC_less128);
4405
4406 BIND(L_exit);
4407 mvnw(crc, crc);
4408 }
4409
4410 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4411 Register len, Register tmp0, Register tmp1, Register tmp2,
4412 Register tmp3) {
4413 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4414 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4415
4416 mvnw(crc, crc);
4417
4418 subs(len, len, 128);
4419 br(Assembler::GE, CRC_by64_pre);
4420 BIND(CRC_less64);
4421 adds(len, len, 128-32);
4422 br(Assembler::GE, CRC_by32_loop);
4423 BIND(CRC_less32);
4424 adds(len, len, 32-4);
4425 br(Assembler::GE, CRC_by4_loop);
4426 adds(len, len, 4);
4427 br(Assembler::GT, CRC_by1_loop);
4428 b(L_exit);
4429
4430 BIND(CRC_by32_loop);
4431 ldp(tmp0, tmp1, Address(post(buf, 16)));
4432 subs(len, len, 32);
4433 crc32x(crc, crc, tmp0);
4434 ldr(tmp2, Address(post(buf, 8)));
4435 crc32x(crc, crc, tmp1);
4436 ldr(tmp3, Address(post(buf, 8)));
4437 crc32x(crc, crc, tmp2);
4438 crc32x(crc, crc, tmp3);
4439 br(Assembler::GE, CRC_by32_loop);
4440 cmn(len, (u1)32);
4441 br(Assembler::NE, CRC_less32);
4442 b(L_exit);
4443
4444 BIND(CRC_by4_loop);
4445 ldrw(tmp0, Address(post(buf, 4)));
4446 subs(len, len, 4);
4447 crc32w(crc, crc, tmp0);
4448 br(Assembler::GE, CRC_by4_loop);
4449 adds(len, len, 4);
4450 br(Assembler::LE, L_exit);
4451 BIND(CRC_by1_loop);
4452 ldrb(tmp0, Address(post(buf, 1)));
4453 subs(len, len, 1);
4454 crc32b(crc, crc, tmp0);
4455 br(Assembler::GT, CRC_by1_loop);
4456 b(L_exit);
4457
4458 BIND(CRC_by64_pre);
4459 sub(buf, buf, 8);
4460 ldp(tmp0, tmp1, Address(buf, 8));
4461 crc32x(crc, crc, tmp0);
4462 ldr(tmp2, Address(buf, 24));
4463 crc32x(crc, crc, tmp1);
4464 ldr(tmp3, Address(buf, 32));
4465 crc32x(crc, crc, tmp2);
4466 ldr(tmp0, Address(buf, 40));
4467 crc32x(crc, crc, tmp3);
4468 ldr(tmp1, Address(buf, 48));
4469 crc32x(crc, crc, tmp0);
4470 ldr(tmp2, Address(buf, 56));
4471 crc32x(crc, crc, tmp1);
4472 ldr(tmp3, Address(pre(buf, 64)));
4473
4474 b(CRC_by64_loop);
4475
4476 align(CodeEntryAlignment);
4477 BIND(CRC_by64_loop);
4478 subs(len, len, 64);
4479 crc32x(crc, crc, tmp2);
4480 ldr(tmp0, Address(buf, 8));
4481 crc32x(crc, crc, tmp3);
4482 ldr(tmp1, Address(buf, 16));
4483 crc32x(crc, crc, tmp0);
4484 ldr(tmp2, Address(buf, 24));
4485 crc32x(crc, crc, tmp1);
4486 ldr(tmp3, Address(buf, 32));
4487 crc32x(crc, crc, tmp2);
4488 ldr(tmp0, Address(buf, 40));
4489 crc32x(crc, crc, tmp3);
4490 ldr(tmp1, Address(buf, 48));
4491 crc32x(crc, crc, tmp0);
4492 ldr(tmp2, Address(buf, 56));
4493 crc32x(crc, crc, tmp1);
4494 ldr(tmp3, Address(pre(buf, 64)));
4495 br(Assembler::GE, CRC_by64_loop);
4496
4497 // post-loop
4498 crc32x(crc, crc, tmp2);
4499 crc32x(crc, crc, tmp3);
4500
4501 sub(len, len, 64);
4502 add(buf, buf, 8);
4503 cmn(len, (u1)128);
4504 br(Assembler::NE, CRC_less64);
4505 BIND(L_exit);
4506 mvnw(crc, crc);
4507 }
4508
4509 /**
4510 * @param crc register containing existing CRC (32-bit)
4511 * @param buf register pointing to input byte buffer (byte*)
4512 * @param len register containing number of bytes
4513 * @param table register that will contain address of CRC table
4514 * @param tmp scratch register
4515 */
4516 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4517 Register table0, Register table1, Register table2, Register table3,
4518 Register tmp, Register tmp2, Register tmp3) {
4519 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4520
4521 if (UseCryptoPmullForCRC32) {
4522 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4523 return;
4524 }
4525
4526 if (UseCRC32) {
4527 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4528 return;
4529 }
4530
4531 mvnw(crc, crc);
4532
4533 {
4534 uint64_t offset;
4535 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4536 add(table0, table0, offset);
4537 }
4538 add(table1, table0, 1*256*sizeof(juint));
4539 add(table2, table0, 2*256*sizeof(juint));
4540 add(table3, table0, 3*256*sizeof(juint));
4541
4542 { // Neon code start
4543 cmp(len, (u1)64);
4544 br(Assembler::LT, L_by16);
4545 eor(v16, T16B, v16, v16);
4546
4547 Label L_fold;
4548
4549 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4550
4551 ld1(v0, v1, T2D, post(buf, 32));
4552 ld1r(v4, T2D, post(tmp, 8));
4553 ld1r(v5, T2D, post(tmp, 8));
4554 ld1r(v6, T2D, post(tmp, 8));
4555 ld1r(v7, T2D, post(tmp, 8));
4556 mov(v16, S, 0, crc);
4557
4558 eor(v0, T16B, v0, v16);
4559 sub(len, len, 64);
4560
4561 BIND(L_fold);
4562 pmull(v22, T8H, v0, v5, T8B);
4563 pmull(v20, T8H, v0, v7, T8B);
4564 pmull(v23, T8H, v0, v4, T8B);
4565 pmull(v21, T8H, v0, v6, T8B);
4566
4567 pmull2(v18, T8H, v0, v5, T16B);
4568 pmull2(v16, T8H, v0, v7, T16B);
4569 pmull2(v19, T8H, v0, v4, T16B);
4570 pmull2(v17, T8H, v0, v6, T16B);
4571
4572 uzp1(v24, T8H, v20, v22);
4573 uzp2(v25, T8H, v20, v22);
4574 eor(v20, T16B, v24, v25);
4575
4576 uzp1(v26, T8H, v16, v18);
4577 uzp2(v27, T8H, v16, v18);
4578 eor(v16, T16B, v26, v27);
4579
4580 ushll2(v22, T4S, v20, T8H, 8);
4581 ushll(v20, T4S, v20, T4H, 8);
4582
4583 ushll2(v18, T4S, v16, T8H, 8);
4584 ushll(v16, T4S, v16, T4H, 8);
4585
4586 eor(v22, T16B, v23, v22);
4587 eor(v18, T16B, v19, v18);
4588 eor(v20, T16B, v21, v20);
4589 eor(v16, T16B, v17, v16);
4590
4591 uzp1(v17, T2D, v16, v20);
4592 uzp2(v21, T2D, v16, v20);
4593 eor(v17, T16B, v17, v21);
4594
4595 ushll2(v20, T2D, v17, T4S, 16);
4596 ushll(v16, T2D, v17, T2S, 16);
4597
4598 eor(v20, T16B, v20, v22);
4599 eor(v16, T16B, v16, v18);
4600
4601 uzp1(v17, T2D, v20, v16);
4602 uzp2(v21, T2D, v20, v16);
4603 eor(v28, T16B, v17, v21);
4604
4605 pmull(v22, T8H, v1, v5, T8B);
4606 pmull(v20, T8H, v1, v7, T8B);
4607 pmull(v23, T8H, v1, v4, T8B);
4608 pmull(v21, T8H, v1, v6, T8B);
4609
4610 pmull2(v18, T8H, v1, v5, T16B);
4611 pmull2(v16, T8H, v1, v7, T16B);
4612 pmull2(v19, T8H, v1, v4, T16B);
4613 pmull2(v17, T8H, v1, v6, T16B);
4614
4615 ld1(v0, v1, T2D, post(buf, 32));
4616
4617 uzp1(v24, T8H, v20, v22);
4618 uzp2(v25, T8H, v20, v22);
4619 eor(v20, T16B, v24, v25);
4620
4621 uzp1(v26, T8H, v16, v18);
4622 uzp2(v27, T8H, v16, v18);
4623 eor(v16, T16B, v26, v27);
4624
4625 ushll2(v22, T4S, v20, T8H, 8);
4626 ushll(v20, T4S, v20, T4H, 8);
4627
4628 ushll2(v18, T4S, v16, T8H, 8);
4629 ushll(v16, T4S, v16, T4H, 8);
4630
4631 eor(v22, T16B, v23, v22);
4632 eor(v18, T16B, v19, v18);
4633 eor(v20, T16B, v21, v20);
4634 eor(v16, T16B, v17, v16);
4635
4636 uzp1(v17, T2D, v16, v20);
4637 uzp2(v21, T2D, v16, v20);
4638 eor(v16, T16B, v17, v21);
4639
4640 ushll2(v20, T2D, v16, T4S, 16);
4641 ushll(v16, T2D, v16, T2S, 16);
4642
4643 eor(v20, T16B, v22, v20);
4644 eor(v16, T16B, v16, v18);
4645
4646 uzp1(v17, T2D, v20, v16);
4647 uzp2(v21, T2D, v20, v16);
4648 eor(v20, T16B, v17, v21);
4649
4650 shl(v16, T2D, v28, 1);
4651 shl(v17, T2D, v20, 1);
4652
4653 eor(v0, T16B, v0, v16);
4654 eor(v1, T16B, v1, v17);
4655
4656 subs(len, len, 32);
4657 br(Assembler::GE, L_fold);
4658
4659 mov(crc, 0);
4660 mov(tmp, v0, D, 0);
4661 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4662 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4663 mov(tmp, v0, D, 1);
4664 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4665 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4666 mov(tmp, v1, D, 0);
4667 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4668 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4669 mov(tmp, v1, D, 1);
4670 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4671 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4672
4673 add(len, len, 32);
4674 } // Neon code end
4675
4676 BIND(L_by16);
4677 subs(len, len, 16);
4678 br(Assembler::GE, L_by16_loop);
4679 adds(len, len, 16-4);
4680 br(Assembler::GE, L_by4_loop);
4681 adds(len, len, 4);
4682 br(Assembler::GT, L_by1_loop);
4683 b(L_exit);
4684
4685 BIND(L_by4_loop);
4686 ldrw(tmp, Address(post(buf, 4)));
4687 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4688 subs(len, len, 4);
4689 br(Assembler::GE, L_by4_loop);
4690 adds(len, len, 4);
4691 br(Assembler::LE, L_exit);
4692 BIND(L_by1_loop);
4693 subs(len, len, 1);
4694 ldrb(tmp, Address(post(buf, 1)));
4695 update_byte_crc32(crc, tmp, table0);
4696 br(Assembler::GT, L_by1_loop);
4697 b(L_exit);
4698
4699 align(CodeEntryAlignment);
4700 BIND(L_by16_loop);
4701 subs(len, len, 16);
4702 ldp(tmp, tmp3, Address(post(buf, 16)));
4703 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4704 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4705 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4706 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4707 br(Assembler::GE, L_by16_loop);
4708 adds(len, len, 16-4);
4709 br(Assembler::GE, L_by4_loop);
4710 adds(len, len, 4);
4711 br(Assembler::GT, L_by1_loop);
4712 BIND(L_exit);
4713 mvnw(crc, crc);
4714 }
4715
4716 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4717 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4718 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4719 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4720
4721 subs(tmp0, len, 384);
4722 br(Assembler::GE, CRC_by128_pre);
4723 BIND(CRC_less128);
4724 subs(len, len, 32);
4725 br(Assembler::GE, CRC_by32_loop);
4726 BIND(CRC_less32);
4727 adds(len, len, 32 - 4);
4728 br(Assembler::GE, CRC_by4_loop);
4729 adds(len, len, 4);
4730 br(Assembler::GT, CRC_by1_loop);
4731 b(L_exit);
4732
4733 BIND(CRC_by32_loop);
4734 ldp(tmp0, tmp1, Address(buf));
4735 crc32cx(crc, crc, tmp0);
4736 ldr(tmp2, Address(buf, 16));
4737 crc32cx(crc, crc, tmp1);
4738 ldr(tmp3, Address(buf, 24));
4739 crc32cx(crc, crc, tmp2);
4740 add(buf, buf, 32);
4741 subs(len, len, 32);
4742 crc32cx(crc, crc, tmp3);
4743 br(Assembler::GE, CRC_by32_loop);
4744 cmn(len, (u1)32);
4745 br(Assembler::NE, CRC_less32);
4746 b(L_exit);
4747
4748 BIND(CRC_by4_loop);
4749 ldrw(tmp0, Address(post(buf, 4)));
4750 subs(len, len, 4);
4751 crc32cw(crc, crc, tmp0);
4752 br(Assembler::GE, CRC_by4_loop);
4753 adds(len, len, 4);
4754 br(Assembler::LE, L_exit);
4755 BIND(CRC_by1_loop);
4756 ldrb(tmp0, Address(post(buf, 1)));
4757 subs(len, len, 1);
4758 crc32cb(crc, crc, tmp0);
4759 br(Assembler::GT, CRC_by1_loop);
4760 b(L_exit);
4761
4762 BIND(CRC_by128_pre);
4763 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4764 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4765 mov(crc, 0);
4766 crc32cx(crc, crc, tmp0);
4767 crc32cx(crc, crc, tmp1);
4768
4769 cbnz(len, CRC_less128);
4770
4771 BIND(L_exit);
4772 }
4773
4774 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4775 Register len, Register tmp0, Register tmp1, Register tmp2,
4776 Register tmp3) {
4777 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4778 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4779
4780 subs(len, len, 128);
4781 br(Assembler::GE, CRC_by64_pre);
4782 BIND(CRC_less64);
4783 adds(len, len, 128-32);
4784 br(Assembler::GE, CRC_by32_loop);
4785 BIND(CRC_less32);
4786 adds(len, len, 32-4);
4787 br(Assembler::GE, CRC_by4_loop);
4788 adds(len, len, 4);
4789 br(Assembler::GT, CRC_by1_loop);
4790 b(L_exit);
4791
4792 BIND(CRC_by32_loop);
4793 ldp(tmp0, tmp1, Address(post(buf, 16)));
4794 subs(len, len, 32);
4795 crc32cx(crc, crc, tmp0);
4796 ldr(tmp2, Address(post(buf, 8)));
4797 crc32cx(crc, crc, tmp1);
4798 ldr(tmp3, Address(post(buf, 8)));
4799 crc32cx(crc, crc, tmp2);
4800 crc32cx(crc, crc, tmp3);
4801 br(Assembler::GE, CRC_by32_loop);
4802 cmn(len, (u1)32);
4803 br(Assembler::NE, CRC_less32);
4804 b(L_exit);
4805
4806 BIND(CRC_by4_loop);
4807 ldrw(tmp0, Address(post(buf, 4)));
4808 subs(len, len, 4);
4809 crc32cw(crc, crc, tmp0);
4810 br(Assembler::GE, CRC_by4_loop);
4811 adds(len, len, 4);
4812 br(Assembler::LE, L_exit);
4813 BIND(CRC_by1_loop);
4814 ldrb(tmp0, Address(post(buf, 1)));
4815 subs(len, len, 1);
4816 crc32cb(crc, crc, tmp0);
4817 br(Assembler::GT, CRC_by1_loop);
4818 b(L_exit);
4819
4820 BIND(CRC_by64_pre);
4821 sub(buf, buf, 8);
4822 ldp(tmp0, tmp1, Address(buf, 8));
4823 crc32cx(crc, crc, tmp0);
4824 ldr(tmp2, Address(buf, 24));
4825 crc32cx(crc, crc, tmp1);
4826 ldr(tmp3, Address(buf, 32));
4827 crc32cx(crc, crc, tmp2);
4828 ldr(tmp0, Address(buf, 40));
4829 crc32cx(crc, crc, tmp3);
4830 ldr(tmp1, Address(buf, 48));
4831 crc32cx(crc, crc, tmp0);
4832 ldr(tmp2, Address(buf, 56));
4833 crc32cx(crc, crc, tmp1);
4834 ldr(tmp3, Address(pre(buf, 64)));
4835
4836 b(CRC_by64_loop);
4837
4838 align(CodeEntryAlignment);
4839 BIND(CRC_by64_loop);
4840 subs(len, len, 64);
4841 crc32cx(crc, crc, tmp2);
4842 ldr(tmp0, Address(buf, 8));
4843 crc32cx(crc, crc, tmp3);
4844 ldr(tmp1, Address(buf, 16));
4845 crc32cx(crc, crc, tmp0);
4846 ldr(tmp2, Address(buf, 24));
4847 crc32cx(crc, crc, tmp1);
4848 ldr(tmp3, Address(buf, 32));
4849 crc32cx(crc, crc, tmp2);
4850 ldr(tmp0, Address(buf, 40));
4851 crc32cx(crc, crc, tmp3);
4852 ldr(tmp1, Address(buf, 48));
4853 crc32cx(crc, crc, tmp0);
4854 ldr(tmp2, Address(buf, 56));
4855 crc32cx(crc, crc, tmp1);
4856 ldr(tmp3, Address(pre(buf, 64)));
4857 br(Assembler::GE, CRC_by64_loop);
4858
4859 // post-loop
4860 crc32cx(crc, crc, tmp2);
4861 crc32cx(crc, crc, tmp3);
4862
4863 sub(len, len, 64);
4864 add(buf, buf, 8);
4865 cmn(len, (u1)128);
4866 br(Assembler::NE, CRC_less64);
4867 BIND(L_exit);
4868 }
4869
4870 /**
4871 * @param crc register containing existing CRC (32-bit)
4872 * @param buf register pointing to input byte buffer (byte*)
4873 * @param len register containing number of bytes
4874 * @param table register that will contain address of CRC table
4875 * @param tmp scratch register
4876 */
4877 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
4878 Register table0, Register table1, Register table2, Register table3,
4879 Register tmp, Register tmp2, Register tmp3) {
4880 if (UseCryptoPmullForCRC32) {
4881 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4882 } else {
4883 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
4884 }
4885 }
4886
4887 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
4888 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
4889 Label CRC_by128_loop;
4890 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4891
4892 sub(len, len, 256);
4893 Register table = tmp0;
4894 {
4895 uint64_t offset;
4896 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4897 add(table, table, offset);
4898 }
4899 add(table, table, table_offset);
4900
4901 // Registers v0..v7 are used as data registers.
4902 // Registers v16..v31 are used as tmp registers.
4903 sub(buf, buf, 0x10);
4904 ldrq(v0, Address(buf, 0x10));
4905 ldrq(v1, Address(buf, 0x20));
4906 ldrq(v2, Address(buf, 0x30));
4907 ldrq(v3, Address(buf, 0x40));
4908 ldrq(v4, Address(buf, 0x50));
4909 ldrq(v5, Address(buf, 0x60));
4910 ldrq(v6, Address(buf, 0x70));
4911 ldrq(v7, Address(pre(buf, 0x80)));
4912
4913 movi(v31, T4S, 0);
4914 mov(v31, S, 0, crc);
4915 eor(v0, T16B, v0, v31);
4916
4917 // Register v16 contains constants from the crc table.
4918 ldrq(v16, Address(table));
4919 b(CRC_by128_loop);
4920
4921 align(OptoLoopAlignment);
4922 BIND(CRC_by128_loop);
4923 pmull (v17, T1Q, v0, v16, T1D);
4924 pmull2(v18, T1Q, v0, v16, T2D);
4925 ldrq(v0, Address(buf, 0x10));
4926 eor3(v0, T16B, v17, v18, v0);
4927
4928 pmull (v19, T1Q, v1, v16, T1D);
4929 pmull2(v20, T1Q, v1, v16, T2D);
4930 ldrq(v1, Address(buf, 0x20));
4931 eor3(v1, T16B, v19, v20, v1);
4932
4933 pmull (v21, T1Q, v2, v16, T1D);
4934 pmull2(v22, T1Q, v2, v16, T2D);
4935 ldrq(v2, Address(buf, 0x30));
4936 eor3(v2, T16B, v21, v22, v2);
4937
4938 pmull (v23, T1Q, v3, v16, T1D);
4939 pmull2(v24, T1Q, v3, v16, T2D);
4940 ldrq(v3, Address(buf, 0x40));
4941 eor3(v3, T16B, v23, v24, v3);
4942
4943 pmull (v25, T1Q, v4, v16, T1D);
4944 pmull2(v26, T1Q, v4, v16, T2D);
4945 ldrq(v4, Address(buf, 0x50));
4946 eor3(v4, T16B, v25, v26, v4);
4947
4948 pmull (v27, T1Q, v5, v16, T1D);
4949 pmull2(v28, T1Q, v5, v16, T2D);
4950 ldrq(v5, Address(buf, 0x60));
4951 eor3(v5, T16B, v27, v28, v5);
4952
4953 pmull (v29, T1Q, v6, v16, T1D);
4954 pmull2(v30, T1Q, v6, v16, T2D);
4955 ldrq(v6, Address(buf, 0x70));
4956 eor3(v6, T16B, v29, v30, v6);
4957
4958 // Reuse registers v23, v24.
4959 // Using them won't block the first instruction of the next iteration.
4960 pmull (v23, T1Q, v7, v16, T1D);
4961 pmull2(v24, T1Q, v7, v16, T2D);
4962 ldrq(v7, Address(pre(buf, 0x80)));
4963 eor3(v7, T16B, v23, v24, v7);
4964
4965 subs(len, len, 0x80);
4966 br(Assembler::GE, CRC_by128_loop);
4967
4968 // fold into 512 bits
4969 // Use v31 for constants because v16 can be still in use.
4970 ldrq(v31, Address(table, 0x10));
4971
4972 pmull (v17, T1Q, v0, v31, T1D);
4973 pmull2(v18, T1Q, v0, v31, T2D);
4974 eor3(v0, T16B, v17, v18, v4);
4975
4976 pmull (v19, T1Q, v1, v31, T1D);
4977 pmull2(v20, T1Q, v1, v31, T2D);
4978 eor3(v1, T16B, v19, v20, v5);
4979
4980 pmull (v21, T1Q, v2, v31, T1D);
4981 pmull2(v22, T1Q, v2, v31, T2D);
4982 eor3(v2, T16B, v21, v22, v6);
4983
4984 pmull (v23, T1Q, v3, v31, T1D);
4985 pmull2(v24, T1Q, v3, v31, T2D);
4986 eor3(v3, T16B, v23, v24, v7);
4987
4988 // fold into 128 bits
4989 // Use v17 for constants because v31 can be still in use.
4990 ldrq(v17, Address(table, 0x20));
4991 pmull (v25, T1Q, v0, v17, T1D);
4992 pmull2(v26, T1Q, v0, v17, T2D);
4993 eor3(v3, T16B, v3, v25, v26);
4994
4995 // Use v18 for constants because v17 can be still in use.
4996 ldrq(v18, Address(table, 0x30));
4997 pmull (v27, T1Q, v1, v18, T1D);
4998 pmull2(v28, T1Q, v1, v18, T2D);
4999 eor3(v3, T16B, v3, v27, v28);
5000
5001 // Use v19 for constants because v18 can be still in use.
5002 ldrq(v19, Address(table, 0x40));
5003 pmull (v29, T1Q, v2, v19, T1D);
5004 pmull2(v30, T1Q, v2, v19, T2D);
5005 eor3(v0, T16B, v3, v29, v30);
5006
5007 add(len, len, 0x80);
5008 add(buf, buf, 0x10);
5009
5010 mov(tmp0, v0, D, 0);
5011 mov(tmp1, v0, D, 1);
5012 }
5013
5014 void MacroAssembler::addptr(const Address &dst, int32_t src) {
5015 Address adr;
5016 switch(dst.getMode()) {
5017 case Address::base_plus_offset:
5018 // This is the expected mode, although we allow all the other
5019 // forms below.
5020 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
5021 break;
5022 default:
5023 lea(rscratch2, dst);
5024 adr = Address(rscratch2);
5025 break;
5026 }
5027 ldr(rscratch1, adr);
5028 add(rscratch1, rscratch1, src);
5029 str(rscratch1, adr);
5030 }
5031
5032 void MacroAssembler::cmpptr(Register src1, Address src2) {
5033 uint64_t offset;
5034 adrp(rscratch1, src2, offset);
5035 ldr(rscratch1, Address(rscratch1, offset));
5036 cmp(src1, rscratch1);
5037 }
5038
5039 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
5040 cmp(obj1, obj2);
5041 }
5042
5043 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5044 load_method_holder(rresult, rmethod);
5045 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5046 }
5047
5048 void MacroAssembler::load_method_holder(Register holder, Register method) {
5049 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
5050 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5051 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5052 }
5053
5054 // Loads the obj's Klass* into dst.
5055 // Preserves all registers (incl src, rscratch1 and rscratch2).
5056 // Input:
5057 // src - the oop we want to load the klass from.
5058 // dst - output narrow klass.
5059 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5060 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
5061 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5062 lsr(dst, dst, markWord::klass_shift);
5063 }
5064
5065 void MacroAssembler::load_klass(Register dst, Register src) {
5066 if (UseCompactObjectHeaders) {
5067 load_narrow_klass_compact(dst, src);
5068 } else {
5069 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5070 }
5071 decode_klass_not_null(dst);
5072 }
5073
5074 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
5075 if (RestoreMXCSROnJNICalls) {
5076 Label OK;
5077 get_fpcr(tmp1);
5078 mov(tmp2, tmp1);
5079 // Set FPCR to the state we need. We do want Round to Nearest. We
5080 // don't want non-IEEE rounding modes or floating-point traps.
5081 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
5082 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
5083 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
5084 eor(tmp2, tmp1, tmp2);
5085 cbz(tmp2, OK); // Only reset FPCR if it's wrong
5086 set_fpcr(tmp1);
5087 bind(OK);
5088 }
5089 }
5090
5091 // ((OopHandle)result).resolve();
5092 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
5093 // OopHandle::resolve is an indirection.
5094 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
5095 }
5096
5097 // ((WeakHandle)result).resolve();
5098 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
5099 assert_different_registers(result, tmp1, tmp2);
5100 Label resolved;
5101
5102 // A null weak handle resolves to null.
5103 cbz(result, resolved);
5104
5105 // Only 64 bit platforms support GCs that require a tmp register
5106 // WeakHandle::resolve is an indirection like jweak.
5107 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5108 result, Address(result), tmp1, tmp2);
5109 bind(resolved);
5110 }
5111
5112 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5113 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5114 ldr(dst, Address(rmethod, Method::const_offset()));
5115 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5116 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5117 ldr(dst, Address(dst, mirror_offset));
5118 resolve_oop_handle(dst, tmp1, tmp2);
5119 }
5120
5121 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5122 assert_different_registers(obj, klass, tmp);
5123 if (UseCompactObjectHeaders) {
5124 load_narrow_klass_compact(tmp, obj);
5125 } else {
5126 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5127 }
5128 if (CompressedKlassPointers::base() == nullptr) {
5129 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5130 return;
5131 } else if (!AOTCodeCache::is_on_for_dump() &&
5132 ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5133 && CompressedKlassPointers::shift() == 0) {
5134 // Only the bottom 32 bits matter
5135 cmpw(klass, tmp);
5136 return;
5137 }
5138 decode_klass_not_null(tmp);
5139 cmp(klass, tmp);
5140 }
5141
5142 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5143 if (UseCompactObjectHeaders) {
5144 load_narrow_klass_compact(tmp1, obj1);
5145 load_narrow_klass_compact(tmp2, obj2);
5146 } else {
5147 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5148 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5149 }
5150 cmpw(tmp1, tmp2);
5151 }
5152
5153 void MacroAssembler::store_klass(Register dst, Register src) {
5154 // FIXME: Should this be a store release? concurrent gcs assumes
5155 // klass length is valid if klass field is not null.
5156 assert(!UseCompactObjectHeaders, "not with compact headers");
5157 encode_klass_not_null(src);
5158 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5159 }
5160
5161 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5162 assert(!UseCompactObjectHeaders, "not with compact headers");
5163 // Store to klass gap in destination
5164 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
5165 }
5166
5167 // Algorithm must match CompressedOops::encode.
5168 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5169 #ifdef ASSERT
5170 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5171 #endif
5172 verify_oop_msg(s, "broken oop in encode_heap_oop");
5173 if (CompressedOops::base() == nullptr) {
5174 if (CompressedOops::shift() != 0) {
5175 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5176 lsr(d, s, LogMinObjAlignmentInBytes);
5177 } else {
5178 mov(d, s);
5179 }
5180 } else {
5181 subs(d, s, rheapbase);
5182 csel(d, d, zr, Assembler::HS);
5183 lsr(d, d, LogMinObjAlignmentInBytes);
5184
5185 /* Old algorithm: is this any worse?
5186 Label nonnull;
5187 cbnz(r, nonnull);
5188 sub(r, r, rheapbase);
5189 bind(nonnull);
5190 lsr(r, r, LogMinObjAlignmentInBytes);
5191 */
5192 }
5193 }
5194
5195 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5196 #ifdef ASSERT
5197 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5198 if (CheckCompressedOops) {
5199 Label ok;
5200 cbnz(r, ok);
5201 stop("null oop passed to encode_heap_oop_not_null");
5202 bind(ok);
5203 }
5204 #endif
5205 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5206 if (CompressedOops::base() != nullptr) {
5207 sub(r, r, rheapbase);
5208 }
5209 if (CompressedOops::shift() != 0) {
5210 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5211 lsr(r, r, LogMinObjAlignmentInBytes);
5212 }
5213 }
5214
5215 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5216 #ifdef ASSERT
5217 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5218 if (CheckCompressedOops) {
5219 Label ok;
5220 cbnz(src, ok);
5221 stop("null oop passed to encode_heap_oop_not_null2");
5222 bind(ok);
5223 }
5224 #endif
5225 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5226
5227 Register data = src;
5228 if (CompressedOops::base() != nullptr) {
5229 sub(dst, src, rheapbase);
5230 data = dst;
5231 }
5232 if (CompressedOops::shift() != 0) {
5233 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5234 lsr(dst, data, LogMinObjAlignmentInBytes);
5235 data = dst;
5236 }
5237 if (data == src)
5238 mov(dst, src);
5239 }
5240
5241 void MacroAssembler::decode_heap_oop(Register d, Register s) {
5242 #ifdef ASSERT
5243 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5244 #endif
5245 if (CompressedOops::base() == nullptr) {
5246 if (CompressedOops::shift() != 0) {
5247 lsl(d, s, CompressedOops::shift());
5248 } else if (d != s) {
5249 mov(d, s);
5250 }
5251 } else {
5252 Label done;
5253 if (d != s)
5254 mov(d, s);
5255 cbz(s, done);
5256 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5257 bind(done);
5258 }
5259 verify_oop_msg(d, "broken oop in decode_heap_oop");
5260 }
5261
5262 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5263 assert (UseCompressedOops, "should only be used for compressed headers");
5264 assert (Universe::heap() != nullptr, "java heap should be initialized");
5265 // Cannot assert, unverified entry point counts instructions (see .ad file)
5266 // vtableStubs also counts instructions in pd_code_size_limit.
5267 // Also do not verify_oop as this is called by verify_oop.
5268 if (CompressedOops::shift() != 0) {
5269 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5270 if (CompressedOops::base() != nullptr) {
5271 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5272 } else {
5273 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5274 }
5275 } else {
5276 assert (CompressedOops::base() == nullptr, "sanity");
5277 }
5278 }
5279
5280 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5281 assert (UseCompressedOops, "should only be used for compressed headers");
5282 assert (Universe::heap() != nullptr, "java heap should be initialized");
5283 // Cannot assert, unverified entry point counts instructions (see .ad file)
5284 // vtableStubs also counts instructions in pd_code_size_limit.
5285 // Also do not verify_oop as this is called by verify_oop.
5286 if (CompressedOops::shift() != 0) {
5287 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5288 if (CompressedOops::base() != nullptr) {
5289 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5290 } else {
5291 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5292 }
5293 } else {
5294 assert (CompressedOops::base() == nullptr, "sanity");
5295 if (dst != src) {
5296 mov(dst, src);
5297 }
5298 }
5299 }
5300
5301 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5302
5303 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5304 assert(Metaspace::initialized(), "metaspace not initialized yet");
5305 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
5306 return _klass_decode_mode;
5307 }
5308
5309 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
5310 // KlassDecodeMode shouldn't be set already.
5311 assert(_klass_decode_mode == KlassDecodeNone, "set once");
5312
5313 if (base == nullptr) {
5314 return KlassDecodeZero;
5315 }
5316
5317 if (operand_valid_for_logical_immediate(
5318 /*is32*/false, (uint64_t)base)) {
5319 const uint64_t range_mask = right_n_bits(log2i_ceil(range));
5320 if (((uint64_t)base & range_mask) == 0) {
5321 return KlassDecodeXor;
5322 }
5323 }
5324
5325 const uint64_t shifted_base =
5326 (uint64_t)base >> shift;
5327 if ((shifted_base & 0xffff0000ffffffff) == 0) {
5328 return KlassDecodeMovk;
5329 }
5330
5331 // No valid encoding.
5332 return KlassDecodeNone;
5333 }
5334
5335 // Check if one of the above decoding modes will work for given base, shift and range.
5336 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
5337 return klass_decode_mode(base, shift, range) != KlassDecodeNone;
5338 }
5339
5340 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
5341 _klass_decode_mode = klass_decode_mode(base, shift, range);
5342 return _klass_decode_mode != KlassDecodeNone;
5343 }
5344
5345 static Register pick_different_tmp(Register dst, Register src) {
5346 auto tmps = RegSet::of(r0, r1, r2) - RegSet::of(src, dst);
5347 return *tmps.begin();
5348 }
5349
5350 void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) {
5351 // we have to load the klass base from the AOT constants area but
5352 // not the shift because it is not allowed to change
5353 int shift = CompressedKlassPointers::shift();
5354 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5355 if (dst != src) {
5356 // we can load the base into dst, subtract it formthe src and shift down
5357 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5358 ldr(dst, dst);
5359 sub(dst, src, dst);
5360 lsr(dst, dst, shift);
5361 } else {
5362 // we need an extra register in order to load the coop base
5363 Register tmp = pick_different_tmp(dst, src);
5364 RegSet regs = RegSet::of(tmp);
5365 push(regs, sp);
5366 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5367 ldr(tmp, tmp);
5368 sub(dst, src, tmp);
5369 lsr(dst, dst, shift);
5370 pop(regs, sp);
5371 }
5372 }
5373
5374 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5375 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5376 encode_klass_not_null_for_aot(dst, src);
5377 return;
5378 }
5379
5380 switch (klass_decode_mode()) {
5381 case KlassDecodeZero:
5382 if (CompressedKlassPointers::shift() != 0) {
5383 lsr(dst, src, CompressedKlassPointers::shift());
5384 } else {
5385 if (dst != src) mov(dst, src);
5386 }
5387 break;
5388
5389 case KlassDecodeXor:
5390 if (CompressedKlassPointers::shift() != 0) {
5391 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5392 lsr(dst, dst, CompressedKlassPointers::shift());
5393 } else {
5394 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5395 }
5396 break;
5397
5398 case KlassDecodeMovk:
5399 if (CompressedKlassPointers::shift() != 0) {
5400 ubfx(dst, src, CompressedKlassPointers::shift(), 32);
5401 } else {
5402 movw(dst, src);
5403 }
5404 break;
5405
5406 case KlassDecodeNone:
5407 ShouldNotReachHere();
5408 break;
5409 }
5410 }
5411
5412 void MacroAssembler::encode_klass_not_null(Register r) {
5413 encode_klass_not_null(r, r);
5414 }
5415
5416 void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
5417 // we have to load the klass base from the AOT constants area but
5418 // not the shift because it is not allowed to change
5419 int shift = CompressedKlassPointers::shift();
5420 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5421 if (dst != src) {
5422 // we can load the base into dst then add the offset with a suitable shift
5423 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5424 ldr(dst, dst);
5425 add(dst, dst, src, LSL, shift);
5426 } else {
5427 // we need an extra register in order to load the coop base
5428 Register tmp = pick_different_tmp(dst, src);
5429 RegSet regs = RegSet::of(tmp);
5430 push(regs, sp);
5431 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5432 ldr(tmp, tmp);
5433 add(dst, tmp, src, LSL, shift);
5434 pop(regs, sp);
5435 }
5436 }
5437
5438 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5439 if (AOTCodeCache::is_on_for_dump()) {
5440 decode_klass_not_null_for_aot(dst, src);
5441 return;
5442 }
5443
5444 switch (klass_decode_mode()) {
5445 case KlassDecodeZero:
5446 if (CompressedKlassPointers::shift() != 0) {
5447 lsl(dst, src, CompressedKlassPointers::shift());
5448 } else {
5449 if (dst != src) mov(dst, src);
5450 }
5451 break;
5452
5453 case KlassDecodeXor:
5454 if (CompressedKlassPointers::shift() != 0) {
5455 lsl(dst, src, CompressedKlassPointers::shift());
5456 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5457 } else {
5458 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5459 }
5460 break;
5461
5462 case KlassDecodeMovk: {
5463 const uint64_t shifted_base =
5464 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5465
5466 if (dst != src) movw(dst, src);
5467 movk(dst, shifted_base >> 32, 32);
5468
5469 if (CompressedKlassPointers::shift() != 0) {
5470 lsl(dst, dst, CompressedKlassPointers::shift());
5471 }
5472
5473 break;
5474 }
5475
5476 case KlassDecodeNone:
5477 ShouldNotReachHere();
5478 break;
5479 }
5480 }
5481
5482 void MacroAssembler::decode_klass_not_null(Register r) {
5483 decode_klass_not_null(r, r);
5484 }
5485
5486 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5487 #ifdef ASSERT
5488 {
5489 ThreadInVMfromUnknown tiv;
5490 assert (UseCompressedOops, "should only be used for compressed oops");
5491 assert (Universe::heap() != nullptr, "java heap should be initialized");
5492 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5493 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5494 }
5495 #endif
5496 int oop_index = oop_recorder()->find_index(obj);
5497 InstructionMark im(this);
5498 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5499 code_section()->relocate(inst_mark(), rspec);
5500 movz(dst, 0xDEAD, 16);
5501 movk(dst, 0xBEEF);
5502 }
5503
5504 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5505 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5506 int index = oop_recorder()->find_index(k);
5507
5508 InstructionMark im(this);
5509 RelocationHolder rspec = metadata_Relocation::spec(index);
5510 code_section()->relocate(inst_mark(), rspec);
5511 narrowKlass nk = CompressedKlassPointers::encode(k);
5512 movz(dst, (nk >> 16), 16);
5513 movk(dst, nk & 0xffff);
5514 }
5515
5516 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5517 Register dst, Address src,
5518 Register tmp1, Register tmp2) {
5519 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5520 decorators = AccessInternal::decorator_fixup(decorators, type);
5521 bool as_raw = (decorators & AS_RAW) != 0;
5522 if (as_raw) {
5523 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5524 } else {
5525 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5526 }
5527 }
5528
5529 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5530 Address dst, Register val,
5531 Register tmp1, Register tmp2, Register tmp3) {
5532 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5533 decorators = AccessInternal::decorator_fixup(decorators, type);
5534 bool as_raw = (decorators & AS_RAW) != 0;
5535 if (as_raw) {
5536 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5537 } else {
5538 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5539 }
5540 }
5541
5542 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5543 Register tmp2, DecoratorSet decorators) {
5544 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5545 }
5546
5547 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5548 Register tmp2, DecoratorSet decorators) {
5549 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5550 }
5551
5552 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5553 Register tmp2, Register tmp3, DecoratorSet decorators) {
5554 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5555 }
5556
5557 // Used for storing nulls.
5558 void MacroAssembler::store_heap_oop_null(Address dst) {
5559 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5560 }
5561
5562 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5563 assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5564 int index = oop_recorder()->allocate_metadata_index(obj);
5565 RelocationHolder rspec = metadata_Relocation::spec(index);
5566 return Address((address)obj, rspec);
5567 }
5568
5569 // Move an oop into a register.
5570 void MacroAssembler::movoop(Register dst, jobject obj) {
5571 int oop_index;
5572 if (obj == nullptr) {
5573 oop_index = oop_recorder()->allocate_oop_index(obj);
5574 } else {
5575 #ifdef ASSERT
5576 {
5577 ThreadInVMfromUnknown tiv;
5578 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5579 }
5580 #endif
5581 oop_index = oop_recorder()->find_index(obj);
5582 }
5583 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5584
5585 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5586 mov(dst, Address((address)obj, rspec));
5587 } else {
5588 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5589 ldr(dst, Address(dummy, rspec));
5590 }
5591 }
5592
5593 // Move a metadata address into a register.
5594 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5595 int oop_index;
5596 if (obj == nullptr) {
5597 oop_index = oop_recorder()->allocate_metadata_index(obj);
5598 } else {
5599 oop_index = oop_recorder()->find_index(obj);
5600 }
5601 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5602 mov(dst, Address((address)obj, rspec));
5603 }
5604
5605 Address MacroAssembler::constant_oop_address(jobject obj) {
5606 #ifdef ASSERT
5607 {
5608 ThreadInVMfromUnknown tiv;
5609 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5610 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5611 }
5612 #endif
5613 int oop_index = oop_recorder()->find_index(obj);
5614 return Address((address)obj, oop_Relocation::spec(oop_index));
5615 }
5616
5617 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5618 void MacroAssembler::tlab_allocate(Register obj,
5619 Register var_size_in_bytes,
5620 int con_size_in_bytes,
5621 Register t1,
5622 Register t2,
5623 Label& slow_case) {
5624 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5625 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5626 }
5627
5628 void MacroAssembler::verify_tlab() {
5629 #ifdef ASSERT
5630 if (UseTLAB && VerifyOops) {
5631 Label next, ok;
5632
5633 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5634
5635 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5636 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5637 cmp(rscratch2, rscratch1);
5638 br(Assembler::HS, next);
5639 STOP("assert(top >= start)");
5640 should_not_reach_here();
5641
5642 bind(next);
5643 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5644 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5645 cmp(rscratch2, rscratch1);
5646 br(Assembler::HS, ok);
5647 STOP("assert(top <= end)");
5648 should_not_reach_here();
5649
5650 bind(ok);
5651 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5652 }
5653 #endif
5654 }
5655
5656 // Writes to stack successive pages until offset reached to check for
5657 // stack overflow + shadow pages. This clobbers tmp.
5658 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5659 assert_different_registers(tmp, size, rscratch1);
5660 mov(tmp, sp);
5661 // Bang stack for total size given plus shadow page size.
5662 // Bang one page at a time because large size can bang beyond yellow and
5663 // red zones.
5664 Label loop;
5665 mov(rscratch1, (int)os::vm_page_size());
5666 bind(loop);
5667 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5668 subsw(size, size, rscratch1);
5669 str(size, Address(tmp));
5670 br(Assembler::GT, loop);
5671
5672 // Bang down shadow pages too.
5673 // At this point, (tmp-0) is the last address touched, so don't
5674 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5675 // was post-decremented.) Skip this address by starting at i=1, and
5676 // touch a few more pages below. N.B. It is important to touch all
5677 // the way down to and including i=StackShadowPages.
5678 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5679 // this could be any sized move but this is can be a debugging crumb
5680 // so the bigger the better.
5681 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5682 str(size, Address(tmp));
5683 }
5684 }
5685
5686 // Move the address of the polling page into dest.
5687 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
5688 ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
5689 }
5690
5691 // Read the polling page. The address of the polling page must
5692 // already be in r.
5693 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
5694 address mark;
5695 {
5696 InstructionMark im(this);
5697 code_section()->relocate(inst_mark(), rtype);
5698 ldrw(zr, Address(r, 0));
5699 mark = inst_mark();
5700 }
5701 verify_cross_modify_fence_not_required();
5702 return mark;
5703 }
5704
5705 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
5706 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
5707 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
5708 uint64_t dest_page = (uint64_t)dest.target() >> 12;
5709 int64_t offset_low = dest_page - low_page;
5710 int64_t offset_high = dest_page - high_page;
5711
5712 assert(is_valid_AArch64_address(dest.target()), "bad address");
5713 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5714
5715 InstructionMark im(this);
5716 code_section()->relocate(inst_mark(), dest.rspec());
5717 // 8143067: Ensure that the adrp can reach the dest from anywhere within
5718 // the code cache so that if it is relocated we know it will still reach
5719 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5720 _adrp(reg1, dest.target());
5721 } else {
5722 uint64_t target = (uint64_t)dest.target();
5723 uint64_t adrp_target
5724 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5725
5726 _adrp(reg1, (address)adrp_target);
5727 movk(reg1, target >> 32, 32);
5728 }
5729 byte_offset = (uint64_t)dest.target() & 0xfff;
5730 }
5731
5732 void MacroAssembler::load_byte_map_base(Register reg) {
5733 #if INCLUDE_CDS
5734 if (AOTCodeCache::is_on_for_dump()) {
5735 address byte_map_base_adr = AOTRuntimeConstants::card_table_base_address();
5736 lea(reg, ExternalAddress(byte_map_base_adr));
5737 ldr(reg, Address(reg));
5738 return;
5739 }
5740 #endif
5741 CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
5742
5743 // Strictly speaking the card table base isn't an address at all, and it might
5744 // even be negative. It is thus materialised as a constant.
5745 mov(reg, (uint64_t)ctbs->card_table_base_const());
5746 }
5747
5748 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5749 #if INCLUDE_CDS
5750 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5751 if (AOTCodeCache::is_on_for_dump()) {
5752 // all aotrc field addresses should be registered in the AOTCodeCache address table
5753 lea(reg, ExternalAddress(a));
5754 } else {
5755 mov(reg, (uint64_t)a);
5756 }
5757 #else
5758 ShouldNotReachHere();
5759 #endif
5760 }
5761
5762 void MacroAssembler::build_frame(int framesize) {
5763 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5764 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5765 protect_return_address();
5766 if (framesize < ((1 << 9) + 2 * wordSize)) {
5767 sub(sp, sp, framesize);
5768 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5769 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5770 } else {
5771 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5772 if (PreserveFramePointer) mov(rfp, sp);
5773 if (framesize < ((1 << 12) + 2 * wordSize))
5774 sub(sp, sp, framesize - 2 * wordSize);
5775 else {
5776 mov(rscratch1, framesize - 2 * wordSize);
5777 sub(sp, sp, rscratch1);
5778 }
5779 }
5780 verify_cross_modify_fence_not_required();
5781 }
5782
5783 void MacroAssembler::remove_frame(int framesize) {
5784 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5785 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5786 if (framesize < ((1 << 9) + 2 * wordSize)) {
5787 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5788 add(sp, sp, framesize);
5789 } else {
5790 if (framesize < ((1 << 12) + 2 * wordSize))
5791 add(sp, sp, framesize - 2 * wordSize);
5792 else {
5793 mov(rscratch1, framesize - 2 * wordSize);
5794 add(sp, sp, rscratch1);
5795 }
5796 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5797 }
5798 authenticate_return_address();
5799 }
5800
5801
5802 // This method counts leading positive bytes (highest bit not set) in provided byte array
5803 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5804 // Simple and most common case of aligned small array which is not at the
5805 // end of memory page is placed here. All other cases are in stub.
5806 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5807 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5808 assert_different_registers(ary1, len, result);
5809
5810 mov(result, len);
5811 cmpw(len, 0);
5812 br(LE, DONE);
5813 cmpw(len, 4 * wordSize);
5814 br(GE, STUB_LONG); // size > 32 then go to stub
5815
5816 int shift = 64 - exact_log2(os::vm_page_size());
5817 lsl(rscratch1, ary1, shift);
5818 mov(rscratch2, (size_t)(4 * wordSize) << shift);
5819 adds(rscratch2, rscratch1, rscratch2); // At end of page?
5820 br(CS, STUB); // at the end of page then go to stub
5821 subs(len, len, wordSize);
5822 br(LT, END);
5823
5824 BIND(LOOP);
5825 ldr(rscratch1, Address(post(ary1, wordSize)));
5826 tst(rscratch1, UPPER_BIT_MASK);
5827 br(NE, SET_RESULT);
5828 subs(len, len, wordSize);
5829 br(GE, LOOP);
5830 cmpw(len, -wordSize);
5831 br(EQ, DONE);
5832
5833 BIND(END);
5834 ldr(rscratch1, Address(ary1));
5835 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
5836 lslv(rscratch1, rscratch1, rscratch2);
5837 tst(rscratch1, UPPER_BIT_MASK);
5838 br(NE, SET_RESULT);
5839 b(DONE);
5840
5841 BIND(STUB);
5842 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
5843 assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
5844 address tpc1 = trampoline_call(count_pos);
5845 if (tpc1 == nullptr) {
5846 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
5847 postcond(pc() == badAddress);
5848 return nullptr;
5849 }
5850 b(DONE);
5851
5852 BIND(STUB_LONG);
5853 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
5854 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
5855 address tpc2 = trampoline_call(count_pos_long);
5856 if (tpc2 == nullptr) {
5857 DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
5858 postcond(pc() == badAddress);
5859 return nullptr;
5860 }
5861 b(DONE);
5862
5863 BIND(SET_RESULT);
5864
5865 add(len, len, wordSize);
5866 sub(result, result, len);
5867
5868 BIND(DONE);
5869 postcond(pc() != badAddress);
5870 return pc();
5871 }
5872
5873 // Clobbers: rscratch1, rscratch2, rflags
5874 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
5875 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
5876 Register tmp4, Register tmp5, Register result,
5877 Register cnt1, int elem_size) {
5878 Label DONE, SAME;
5879 Register tmp1 = rscratch1;
5880 Register tmp2 = rscratch2;
5881 int elem_per_word = wordSize/elem_size;
5882 int log_elem_size = exact_log2(elem_size);
5883 int klass_offset = arrayOopDesc::klass_offset_in_bytes();
5884 int length_offset = arrayOopDesc::length_offset_in_bytes();
5885 int base_offset
5886 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
5887 // When the length offset is not aligned to 8 bytes,
5888 // then we align it down. This is valid because the new
5889 // offset will always be the klass which is the same
5890 // for type arrays.
5891 int start_offset = align_down(length_offset, BytesPerWord);
5892 int extra_length = base_offset - start_offset;
5893 assert(start_offset == length_offset || start_offset == klass_offset,
5894 "start offset must be 8-byte-aligned or be the klass offset");
5895 assert(base_offset != start_offset, "must include the length field");
5896 extra_length = extra_length / elem_size; // We count in elements, not bytes.
5897 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
5898
5899 assert(elem_size == 1 || elem_size == 2, "must be char or byte");
5900 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
5901
5902 #ifndef PRODUCT
5903 {
5904 const char kind = (elem_size == 2) ? 'U' : 'L';
5905 char comment[64];
5906 os::snprintf_checked(comment, sizeof comment, "array_equals%c{", kind);
5907 BLOCK_COMMENT(comment);
5908 }
5909 #endif
5910
5911 // if (a1 == a2)
5912 // return true;
5913 cmpoop(a1, a2); // May have read barriers for a1 and a2.
5914 br(EQ, SAME);
5915
5916 if (UseSimpleArrayEquals) {
5917 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
5918 // if (a1 == nullptr || a2 == nullptr)
5919 // return false;
5920 // a1 & a2 == 0 means (some-pointer is null) or
5921 // (very-rare-or-even-probably-impossible-pointer-values)
5922 // so, we can save one branch in most cases
5923 tst(a1, a2);
5924 mov(result, false);
5925 br(EQ, A_MIGHT_BE_NULL);
5926 // if (a1.length != a2.length)
5927 // return false;
5928 bind(A_IS_NOT_NULL);
5929 ldrw(cnt1, Address(a1, length_offset));
5930 ldrw(tmp5, Address(a2, length_offset));
5931 cmp(cnt1, tmp5);
5932 br(NE, DONE); // If lengths differ, return false
5933 // Increase loop counter by diff between base- and actual start-offset.
5934 addw(cnt1, cnt1, extra_length);
5935 lea(a1, Address(a1, start_offset));
5936 lea(a2, Address(a2, start_offset));
5937 // Check for short strings, i.e. smaller than wordSize.
5938 subs(cnt1, cnt1, elem_per_word);
5939 br(Assembler::LT, SHORT);
5940 // Main 8 byte comparison loop.
5941 bind(NEXT_WORD); {
5942 ldr(tmp1, Address(post(a1, wordSize)));
5943 ldr(tmp2, Address(post(a2, wordSize)));
5944 subs(cnt1, cnt1, elem_per_word);
5945 eor(tmp5, tmp1, tmp2);
5946 cbnz(tmp5, DONE);
5947 } br(GT, NEXT_WORD);
5948 // Last longword. In the case where length == 4 we compare the
5949 // same longword twice, but that's still faster than another
5950 // conditional branch.
5951 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
5952 // length == 4.
5953 if (log_elem_size > 0)
5954 lsl(cnt1, cnt1, log_elem_size);
5955 ldr(tmp3, Address(a1, cnt1));
5956 ldr(tmp4, Address(a2, cnt1));
5957 eor(tmp5, tmp3, tmp4);
5958 cbnz(tmp5, DONE);
5959 b(SAME);
5960 bind(A_MIGHT_BE_NULL);
5961 // in case both a1 and a2 are not-null, proceed with loads
5962 cbz(a1, DONE);
5963 cbz(a2, DONE);
5964 b(A_IS_NOT_NULL);
5965 bind(SHORT);
5966
5967 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
5968 {
5969 ldrw(tmp1, Address(post(a1, 4)));
5970 ldrw(tmp2, Address(post(a2, 4)));
5971 eorw(tmp5, tmp1, tmp2);
5972 cbnzw(tmp5, DONE);
5973 }
5974 bind(TAIL03);
5975 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
5976 {
5977 ldrh(tmp3, Address(post(a1, 2)));
5978 ldrh(tmp4, Address(post(a2, 2)));
5979 eorw(tmp5, tmp3, tmp4);
5980 cbnzw(tmp5, DONE);
5981 }
5982 bind(TAIL01);
5983 if (elem_size == 1) { // Only needed when comparing byte arrays.
5984 tbz(cnt1, 0, SAME); // 0-1 bytes left.
5985 {
5986 ldrb(tmp1, a1);
5987 ldrb(tmp2, a2);
5988 eorw(tmp5, tmp1, tmp2);
5989 cbnzw(tmp5, DONE);
5990 }
5991 }
5992 } else {
5993 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
5994 CSET_EQ, LAST_CHECK;
5995 mov(result, false);
5996 cbz(a1, DONE);
5997 ldrw(cnt1, Address(a1, length_offset));
5998 cbz(a2, DONE);
5999 ldrw(tmp5, Address(a2, length_offset));
6000 cmp(cnt1, tmp5);
6001 br(NE, DONE); // If lengths differ, return false
6002 // Increase loop counter by diff between base- and actual start-offset.
6003 addw(cnt1, cnt1, extra_length);
6004
6005 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
6006 // faster to perform another branch before comparing a1 and a2
6007 cmp(cnt1, (u1)elem_per_word);
6008 br(LE, SHORT); // short or same
6009 ldr(tmp3, Address(pre(a1, start_offset)));
6010 subs(zr, cnt1, stubBytesThreshold);
6011 br(GE, STUB);
6012 ldr(tmp4, Address(pre(a2, start_offset)));
6013 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6014
6015 // Main 16 byte comparison loop with 2 exits
6016 bind(NEXT_DWORD); {
6017 ldr(tmp1, Address(pre(a1, wordSize)));
6018 ldr(tmp2, Address(pre(a2, wordSize)));
6019 subs(cnt1, cnt1, 2 * elem_per_word);
6020 br(LE, TAIL);
6021 eor(tmp4, tmp3, tmp4);
6022 cbnz(tmp4, DONE);
6023 ldr(tmp3, Address(pre(a1, wordSize)));
6024 ldr(tmp4, Address(pre(a2, wordSize)));
6025 cmp(cnt1, (u1)elem_per_word);
6026 br(LE, TAIL2);
6027 cmp(tmp1, tmp2);
6028 } br(EQ, NEXT_DWORD);
6029 b(DONE);
6030
6031 bind(TAIL);
6032 eor(tmp4, tmp3, tmp4);
6033 eor(tmp2, tmp1, tmp2);
6034 lslv(tmp2, tmp2, tmp5);
6035 orr(tmp5, tmp4, tmp2);
6036 cmp(tmp5, zr);
6037 b(CSET_EQ);
6038
6039 bind(TAIL2);
6040 eor(tmp2, tmp1, tmp2);
6041 cbnz(tmp2, DONE);
6042 b(LAST_CHECK);
6043
6044 bind(STUB);
6045 ldr(tmp4, Address(pre(a2, start_offset)));
6046 if (elem_size == 2) { // convert to byte counter
6047 lsl(cnt1, cnt1, 1);
6048 }
6049 eor(tmp5, tmp3, tmp4);
6050 cbnz(tmp5, DONE);
6051 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
6052 assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
6053 address tpc = trampoline_call(stub);
6054 if (tpc == nullptr) {
6055 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
6056 postcond(pc() == badAddress);
6057 return nullptr;
6058 }
6059 b(DONE);
6060
6061 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
6062 // so, if a2 == null => return false(0), else return true, so we can return a2
6063 mov(result, a2);
6064 b(DONE);
6065 bind(SHORT);
6066 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6067 ldr(tmp3, Address(a1, start_offset));
6068 ldr(tmp4, Address(a2, start_offset));
6069 bind(LAST_CHECK);
6070 eor(tmp4, tmp3, tmp4);
6071 lslv(tmp5, tmp4, tmp5);
6072 cmp(tmp5, zr);
6073 bind(CSET_EQ);
6074 cset(result, EQ);
6075 b(DONE);
6076 }
6077
6078 bind(SAME);
6079 mov(result, true);
6080 // That's it.
6081 bind(DONE);
6082
6083 BLOCK_COMMENT("} array_equals");
6084 postcond(pc() != badAddress);
6085 return pc();
6086 }
6087
6088 // Compare Strings
6089
6090 // For Strings we're passed the address of the first characters in a1
6091 // and a2 and the length in cnt1.
6092 // There are two implementations. For arrays >= 8 bytes, all
6093 // comparisons (including the final one, which may overlap) are
6094 // performed 8 bytes at a time. For strings < 8 bytes, we compare a
6095 // halfword, then a short, and then a byte.
6096
6097 void MacroAssembler::string_equals(Register a1, Register a2,
6098 Register result, Register cnt1)
6099 {
6100 Label SAME, DONE, SHORT, NEXT_WORD;
6101 Register tmp1 = rscratch1;
6102 Register tmp2 = rscratch2;
6103
6104 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6105
6106 #ifndef PRODUCT
6107 {
6108 char comment[64];
6109 os::snprintf_checked(comment, sizeof comment, "{string_equalsL");
6110 BLOCK_COMMENT(comment);
6111 }
6112 #endif
6113
6114 mov(result, false);
6115
6116 // Check for short strings, i.e. smaller than wordSize.
6117 subs(cnt1, cnt1, wordSize);
6118 br(Assembler::LT, SHORT);
6119 // Main 8 byte comparison loop.
6120 bind(NEXT_WORD); {
6121 ldr(tmp1, Address(post(a1, wordSize)));
6122 ldr(tmp2, Address(post(a2, wordSize)));
6123 subs(cnt1, cnt1, wordSize);
6124 eor(tmp1, tmp1, tmp2);
6125 cbnz(tmp1, DONE);
6126 } br(GT, NEXT_WORD);
6127 // Last longword. In the case where length == 4 we compare the
6128 // same longword twice, but that's still faster than another
6129 // conditional branch.
6130 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6131 // length == 4.
6132 ldr(tmp1, Address(a1, cnt1));
6133 ldr(tmp2, Address(a2, cnt1));
6134 eor(tmp2, tmp1, tmp2);
6135 cbnz(tmp2, DONE);
6136 b(SAME);
6137
6138 bind(SHORT);
6139 Label TAIL03, TAIL01;
6140
6141 tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
6142 {
6143 ldrw(tmp1, Address(post(a1, 4)));
6144 ldrw(tmp2, Address(post(a2, 4)));
6145 eorw(tmp1, tmp1, tmp2);
6146 cbnzw(tmp1, DONE);
6147 }
6148 bind(TAIL03);
6149 tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
6150 {
6151 ldrh(tmp1, Address(post(a1, 2)));
6152 ldrh(tmp2, Address(post(a2, 2)));
6153 eorw(tmp1, tmp1, tmp2);
6154 cbnzw(tmp1, DONE);
6155 }
6156 bind(TAIL01);
6157 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6158 {
6159 ldrb(tmp1, a1);
6160 ldrb(tmp2, a2);
6161 eorw(tmp1, tmp1, tmp2);
6162 cbnzw(tmp1, DONE);
6163 }
6164 // Arrays are equal.
6165 bind(SAME);
6166 mov(result, true);
6167
6168 // That's it.
6169 bind(DONE);
6170 BLOCK_COMMENT("} string_equals");
6171 }
6172
6173
6174 // The size of the blocks erased by the zero_blocks stub. We must
6175 // handle anything smaller than this ourselves in zero_words().
6176 const int MacroAssembler::zero_words_block_size = 8;
6177
6178 // zero_words() is used by C2 ClearArray patterns and by
6179 // C1_MacroAssembler. It is as small as possible, handling small word
6180 // counts locally and delegating anything larger to the zero_blocks
6181 // stub. It is expanded many times in compiled code, so it is
6182 // important to keep it short.
6183
6184 // ptr: Address of a buffer to be zeroed.
6185 // cnt: Count in HeapWords.
6186 //
6187 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
6188 address MacroAssembler::zero_words(Register ptr, Register cnt)
6189 {
6190 assert(is_power_of_2(zero_words_block_size), "adjust this");
6191
6192 BLOCK_COMMENT("zero_words {");
6193 assert(ptr == r10 && cnt == r11, "mismatch in register usage");
6194 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6195 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6196
6197 subs(rscratch1, cnt, zero_words_block_size);
6198 Label around;
6199 br(LO, around);
6200 {
6201 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6202 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6203 // Make sure this is a C2 compilation. C1 allocates space only for
6204 // trampoline stubs generated by Call LIR ops, and in any case it
6205 // makes sense for a C1 compilation task to proceed as quickly as
6206 // possible.
6207 CompileTask* task;
6208 if (StubRoutines::aarch64::complete()
6209 && Thread::current()->is_Compiler_thread()
6210 && (task = ciEnv::current()->task())
6211 && is_c2_compile(task->comp_level())) {
6212 address tpc = trampoline_call(zero_blocks);
6213 if (tpc == nullptr) {
6214 DEBUG_ONLY(reset_labels(around));
6215 return nullptr;
6216 }
6217 } else {
6218 far_call(zero_blocks);
6219 }
6220 }
6221 bind(around);
6222
6223 // We have a few words left to do. zero_blocks has adjusted r10 and r11
6224 // for us.
6225 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
6226 Label l;
6227 tbz(cnt, exact_log2(i), l);
6228 for (int j = 0; j < i; j += 2) {
6229 stp(zr, zr, post(ptr, 2 * BytesPerWord));
6230 }
6231 bind(l);
6232 }
6233 {
6234 Label l;
6235 tbz(cnt, 0, l);
6236 str(zr, Address(ptr));
6237 bind(l);
6238 }
6239
6240 BLOCK_COMMENT("} zero_words");
6241 return pc();
6242 }
6243
6244 // base: Address of a buffer to be zeroed, 8 bytes aligned.
6245 // cnt: Immediate count in HeapWords.
6246 //
6247 // r10, r11, rscratch1, and rscratch2 are clobbered.
6248 address MacroAssembler::zero_words(Register base, uint64_t cnt)
6249 {
6250 assert(wordSize <= BlockZeroingLowLimit,
6251 "increase BlockZeroingLowLimit");
6252 address result = nullptr;
6253 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
6254 #ifndef PRODUCT
6255 {
6256 char buf[64];
6257 os::snprintf_checked(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
6258 BLOCK_COMMENT(buf);
6259 }
6260 #endif
6261 if (cnt >= 16) {
6262 uint64_t loops = cnt/16;
6263 if (loops > 1) {
6264 mov(rscratch2, loops - 1);
6265 }
6266 {
6267 Label loop;
6268 bind(loop);
6269 for (int i = 0; i < 16; i += 2) {
6270 stp(zr, zr, Address(base, i * BytesPerWord));
6271 }
6272 add(base, base, 16 * BytesPerWord);
6273 if (loops > 1) {
6274 subs(rscratch2, rscratch2, 1);
6275 br(GE, loop);
6276 }
6277 }
6278 }
6279 cnt %= 16;
6280 int i = cnt & 1; // store any odd word to start
6281 if (i) str(zr, Address(base));
6282 for (; i < (int)cnt; i += 2) {
6283 stp(zr, zr, Address(base, i * wordSize));
6284 }
6285 BLOCK_COMMENT("} zero_words");
6286 result = pc();
6287 } else {
6288 mov(r10, base); mov(r11, cnt);
6289 result = zero_words(r10, r11);
6290 }
6291 return result;
6292 }
6293
6294 // Zero blocks of memory by using DC ZVA.
6295 //
6296 // Aligns the base address first sufficiently for DC ZVA, then uses
6297 // DC ZVA repeatedly for every full block. cnt is the size to be
6298 // zeroed in HeapWords. Returns the count of words left to be zeroed
6299 // in cnt.
6300 //
6301 // NOTE: This is intended to be used in the zero_blocks() stub. If
6302 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
6303 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
6304 Register tmp = rscratch1;
6305 Register tmp2 = rscratch2;
6306 int zva_length = VM_Version::zva_length();
6307 Label initial_table_end, loop_zva;
6308 Label fini;
6309
6310 // Base must be 16 byte aligned. If not just return and let caller handle it
6311 tst(base, 0x0f);
6312 br(Assembler::NE, fini);
6313 // Align base with ZVA length.
6314 neg(tmp, base);
6315 andr(tmp, tmp, zva_length - 1);
6316
6317 // tmp: the number of bytes to be filled to align the base with ZVA length.
6318 add(base, base, tmp);
6319 sub(cnt, cnt, tmp, Assembler::ASR, 3);
6320 adr(tmp2, initial_table_end);
6321 sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
6322 br(tmp2);
6323
6324 for (int i = -zva_length + 16; i < 0; i += 16)
6325 stp(zr, zr, Address(base, i));
6326 bind(initial_table_end);
6327
6328 sub(cnt, cnt, zva_length >> 3);
6329 bind(loop_zva);
6330 dc(Assembler::ZVA, base);
6331 subs(cnt, cnt, zva_length >> 3);
6332 add(base, base, zva_length);
6333 br(Assembler::GE, loop_zva);
6334 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6335 bind(fini);
6336 }
6337
6338 // base: Address of a buffer to be filled, 8 bytes aligned.
6339 // cnt: Count in 8-byte unit.
6340 // value: Value to be filled with.
6341 // base will point to the end of the buffer after filling.
6342 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6343 {
6344 // Algorithm:
6345 //
6346 // if (cnt == 0) {
6347 // return;
6348 // }
6349 // if ((p & 8) != 0) {
6350 // *p++ = v;
6351 // }
6352 //
6353 // scratch1 = cnt & 14;
6354 // cnt -= scratch1;
6355 // p += scratch1;
6356 // switch (scratch1 / 2) {
6357 // do {
6358 // cnt -= 16;
6359 // p[-16] = v;
6360 // p[-15] = v;
6361 // case 7:
6362 // p[-14] = v;
6363 // p[-13] = v;
6364 // case 6:
6365 // p[-12] = v;
6366 // p[-11] = v;
6367 // // ...
6368 // case 1:
6369 // p[-2] = v;
6370 // p[-1] = v;
6371 // case 0:
6372 // p += 16;
6373 // } while (cnt);
6374 // }
6375 // if ((cnt & 1) == 1) {
6376 // *p++ = v;
6377 // }
6378
6379 assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6380
6381 Label fini, skip, entry, loop;
6382 const int unroll = 8; // Number of stp instructions we'll unroll
6383
6384 cbz(cnt, fini);
6385 tbz(base, 3, skip);
6386 str(value, Address(post(base, 8)));
6387 sub(cnt, cnt, 1);
6388 bind(skip);
6389
6390 andr(rscratch1, cnt, (unroll-1) * 2);
6391 sub(cnt, cnt, rscratch1);
6392 add(base, base, rscratch1, Assembler::LSL, 3);
6393 adr(rscratch2, entry);
6394 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6395 br(rscratch2);
6396
6397 bind(loop);
6398 add(base, base, unroll * 16);
6399 for (int i = -unroll; i < 0; i++)
6400 stp(value, value, Address(base, i * 16));
6401 bind(entry);
6402 subs(cnt, cnt, unroll * 2);
6403 br(Assembler::GE, loop);
6404
6405 tbz(cnt, 0, fini);
6406 str(value, Address(post(base, 8)));
6407 bind(fini);
6408 }
6409
6410 // Intrinsic for
6411 //
6412 // - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6413 // Encodes char[] to byte[] in ISO-8859-1
6414 //
6415 // - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6416 // Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
6417 //
6418 // - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6419 // Encodes char[] to byte[] in ASCII
6420 //
6421 // This version always returns the number of characters copied, and does not
6422 // clobber the 'len' register. A successful copy will complete with the post-
6423 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
6424 // post-condition: 0 <= 'res' < 'len'.
6425 //
6426 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6427 // degrade performance (on Ampere Altra - Neoverse N1), to an extent
6428 // beyond the acceptable, even though the footprint would be smaller.
6429 // Using 'umaxv' in the ASCII-case comes with a small penalty but does
6430 // avoid additional bloat.
6431 //
6432 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6433 void MacroAssembler::encode_iso_array(Register src, Register dst,
6434 Register len, Register res, bool ascii,
6435 FloatRegister vtmp0, FloatRegister vtmp1,
6436 FloatRegister vtmp2, FloatRegister vtmp3,
6437 FloatRegister vtmp4, FloatRegister vtmp5)
6438 {
6439 Register cnt = res;
6440 Register max = rscratch1;
6441 Register chk = rscratch2;
6442
6443 prfm(Address(src), PLDL1STRM);
6444 movw(cnt, len);
6445
6446 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
6447
6448 Label LOOP_32, DONE_32, FAIL_32;
6449
6450 BIND(LOOP_32);
6451 {
6452 cmpw(cnt, 32);
6453 br(LT, DONE_32);
6454 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6455 // Extract lower bytes.
6456 FloatRegister vlo0 = vtmp4;
6457 FloatRegister vlo1 = vtmp5;
6458 uzp1(vlo0, T16B, vtmp0, vtmp1);
6459 uzp1(vlo1, T16B, vtmp2, vtmp3);
6460 // Merge bits...
6461 orr(vtmp0, T16B, vtmp0, vtmp1);
6462 orr(vtmp2, T16B, vtmp2, vtmp3);
6463 // Extract merged upper bytes.
6464 FloatRegister vhix = vtmp0;
6465 uzp2(vhix, T16B, vtmp0, vtmp2);
6466 // ISO-check on hi-parts (all zero).
6467 // ASCII-check on lo-parts (no sign).
6468 FloatRegister vlox = vtmp1; // Merge lower bytes.
6469 ASCII(orr(vlox, T16B, vlo0, vlo1));
6470 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox));
6471 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox));
6472 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0));
6473 ASCII(orr(chk, chk, max));
6474 cbnz(chk, FAIL_32);
6475 subw(cnt, cnt, 32);
6476 st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6477 b(LOOP_32);
6478 }
6479 BIND(FAIL_32);
6480 sub(src, src, 64);
6481 BIND(DONE_32);
6482
6483 Label LOOP_8, SKIP_8;
6484
6485 BIND(LOOP_8);
6486 {
6487 cmpw(cnt, 8);
6488 br(LT, SKIP_8);
6489 FloatRegister vhi = vtmp0;
6490 FloatRegister vlo = vtmp1;
6491 ld1(vtmp3, T8H, src);
6492 uzp1(vlo, T16B, vtmp3, vtmp3);
6493 uzp2(vhi, T16B, vtmp3, vtmp3);
6494 // ISO-check on hi-parts (all zero).
6495 // ASCII-check on lo-parts (no sign).
6496 ASCII(cm(LT, vtmp2, T16B, vlo));
6497 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2));
6498 ASCII(umov(max, vtmp2, B, 0));
6499 ASCII(orr(chk, chk, max));
6500 cbnz(chk, SKIP_8);
6501
6502 strd(vlo, Address(post(dst, 8)));
6503 subw(cnt, cnt, 8);
6504 add(src, src, 16);
6505 b(LOOP_8);
6506 }
6507 BIND(SKIP_8);
6508
6509 #undef ASCII
6510
6511 Label LOOP, DONE;
6512
6513 cbz(cnt, DONE);
6514 BIND(LOOP);
6515 {
6516 Register chr = rscratch1;
6517 ldrh(chr, Address(post(src, 2)));
6518 tst(chr, ascii ? 0xff80 : 0xff00);
6519 br(NE, DONE);
6520 strb(chr, Address(post(dst, 1)));
6521 subs(cnt, cnt, 1);
6522 br(GT, LOOP);
6523 }
6524 BIND(DONE);
6525 // Return index where we stopped.
6526 subw(res, len, cnt);
6527 }
6528
6529 // Inflate byte[] array to char[].
6530 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6531 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6532 FloatRegister vtmp1, FloatRegister vtmp2,
6533 FloatRegister vtmp3, Register tmp4) {
6534 Label big, done, after_init, to_stub;
6535
6536 assert_different_registers(src, dst, len, tmp4, rscratch1);
6537
6538 fmovd(vtmp1, 0.0);
6539 lsrw(tmp4, len, 3);
6540 bind(after_init);
6541 cbnzw(tmp4, big);
6542 // Short string: less than 8 bytes.
6543 {
6544 Label loop, tiny;
6545
6546 cmpw(len, 4);
6547 br(LT, tiny);
6548 // Use SIMD to do 4 bytes.
6549 ldrs(vtmp2, post(src, 4));
6550 zip1(vtmp3, T8B, vtmp2, vtmp1);
6551 subw(len, len, 4);
6552 strd(vtmp3, post(dst, 8));
6553
6554 cbzw(len, done);
6555
6556 // Do the remaining bytes by steam.
6557 bind(loop);
6558 ldrb(tmp4, post(src, 1));
6559 strh(tmp4, post(dst, 2));
6560 subw(len, len, 1);
6561
6562 bind(tiny);
6563 cbnz(len, loop);
6564
6565 b(done);
6566 }
6567
6568 if (SoftwarePrefetchHintDistance >= 0) {
6569 bind(to_stub);
6570 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6571 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6572 address tpc = trampoline_call(stub);
6573 if (tpc == nullptr) {
6574 DEBUG_ONLY(reset_labels(big, done));
6575 postcond(pc() == badAddress);
6576 return nullptr;
6577 }
6578 b(after_init);
6579 }
6580
6581 // Unpack the bytes 8 at a time.
6582 bind(big);
6583 {
6584 Label loop, around, loop_last, loop_start;
6585
6586 if (SoftwarePrefetchHintDistance >= 0) {
6587 const int large_loop_threshold = (64 + 16)/8;
6588 ldrd(vtmp2, post(src, 8));
6589 andw(len, len, 7);
6590 cmp(tmp4, (u1)large_loop_threshold);
6591 br(GE, to_stub);
6592 b(loop_start);
6593
6594 bind(loop);
6595 ldrd(vtmp2, post(src, 8));
6596 bind(loop_start);
6597 subs(tmp4, tmp4, 1);
6598 br(EQ, loop_last);
6599 zip1(vtmp2, T16B, vtmp2, vtmp1);
6600 ldrd(vtmp3, post(src, 8));
6601 st1(vtmp2, T8H, post(dst, 16));
6602 subs(tmp4, tmp4, 1);
6603 zip1(vtmp3, T16B, vtmp3, vtmp1);
6604 st1(vtmp3, T8H, post(dst, 16));
6605 br(NE, loop);
6606 b(around);
6607 bind(loop_last);
6608 zip1(vtmp2, T16B, vtmp2, vtmp1);
6609 st1(vtmp2, T8H, post(dst, 16));
6610 bind(around);
6611 cbz(len, done);
6612 } else {
6613 andw(len, len, 7);
6614 bind(loop);
6615 ldrd(vtmp2, post(src, 8));
6616 sub(tmp4, tmp4, 1);
6617 zip1(vtmp3, T16B, vtmp2, vtmp1);
6618 st1(vtmp3, T8H, post(dst, 16));
6619 cbnz(tmp4, loop);
6620 }
6621 }
6622
6623 // Do the tail of up to 8 bytes.
6624 add(src, src, len);
6625 ldrd(vtmp3, Address(src, -8));
6626 add(dst, dst, len, ext::uxtw, 1);
6627 zip1(vtmp3, T16B, vtmp3, vtmp1);
6628 strq(vtmp3, Address(dst, -16));
6629
6630 bind(done);
6631 postcond(pc() != badAddress);
6632 return pc();
6633 }
6634
6635 // Compress char[] array to byte[].
6636 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
6637 // Return the array length if every element in array can be encoded,
6638 // otherwise, the index of first non-latin1 (> 0xff) character.
6639 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
6640 Register res,
6641 FloatRegister tmp0, FloatRegister tmp1,
6642 FloatRegister tmp2, FloatRegister tmp3,
6643 FloatRegister tmp4, FloatRegister tmp5) {
6644 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
6645 }
6646
6647 // java.math.round(double a)
6648 // Returns the closest long to the argument, with ties rounding to
6649 // positive infinity. This requires some fiddling for corner
6650 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
6651 void MacroAssembler::java_round_double(Register dst, FloatRegister src,
6652 FloatRegister ftmp) {
6653 Label DONE;
6654 BLOCK_COMMENT("java_round_double: { ");
6655 fmovd(rscratch1, src);
6656 // Use RoundToNearestTiesAway unless src small and -ve.
6657 fcvtasd(dst, src);
6658 // Test if src >= 0 || abs(src) >= 0x1.0p52
6659 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
6660 mov(rscratch2, julong_cast(0x1.0p52));
6661 cmp(rscratch1, rscratch2);
6662 br(HS, DONE); {
6663 // src < 0 && abs(src) < 0x1.0p52
6664 // src may have a fractional part, so add 0.5
6665 fmovd(ftmp, 0.5);
6666 faddd(ftmp, src, ftmp);
6667 // Convert double to jlong, use RoundTowardsNegative
6668 fcvtmsd(dst, ftmp);
6669 }
6670 bind(DONE);
6671 BLOCK_COMMENT("} java_round_double");
6672 }
6673
6674 void MacroAssembler::java_round_float(Register dst, FloatRegister src,
6675 FloatRegister ftmp) {
6676 Label DONE;
6677 BLOCK_COMMENT("java_round_float: { ");
6678 fmovs(rscratch1, src);
6679 // Use RoundToNearestTiesAway unless src small and -ve.
6680 fcvtassw(dst, src);
6681 // Test if src >= 0 || abs(src) >= 0x1.0p23
6682 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
6683 mov(rscratch2, jint_cast(0x1.0p23f));
6684 cmp(rscratch1, rscratch2);
6685 br(HS, DONE); {
6686 // src < 0 && |src| < 0x1.0p23
6687 // src may have a fractional part, so add 0.5
6688 fmovs(ftmp, 0.5f);
6689 fadds(ftmp, src, ftmp);
6690 // Convert float to jint, use RoundTowardsNegative
6691 fcvtmssw(dst, ftmp);
6692 }
6693 bind(DONE);
6694 BLOCK_COMMENT("} java_round_float");
6695 }
6696
6697 // get_thread() can be called anywhere inside generated code so we
6698 // need to save whatever non-callee save context might get clobbered
6699 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
6700 // the call setup code.
6701 //
6702 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
6703 // On other systems, the helper is a usual C function.
6704 //
6705 void MacroAssembler::get_thread(Register dst) {
6706 RegSet saved_regs =
6707 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6708 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6709
6710 protect_return_address();
6711 push(saved_regs, sp);
6712
6713 mov(lr, ExternalAddress(CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)));
6714 blr(lr);
6715 if (dst != c_rarg0) {
6716 mov(dst, c_rarg0);
6717 }
6718
6719 pop(saved_regs, sp);
6720 authenticate_return_address();
6721 }
6722
6723 void MacroAssembler::cache_wb(Address line) {
6724 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6725 assert(line.index() == noreg, "index should be noreg");
6726 assert(line.offset() == 0, "offset should be 0");
6727 // would like to assert this
6728 // assert(line._ext.shift == 0, "shift should be zero");
6729 if (VM_Version::supports_dcpop()) {
6730 // writeback using clear virtual address to point of persistence
6731 dc(Assembler::CVAP, line.base());
6732 } else {
6733 // no need to generate anything as Unsafe.writebackMemory should
6734 // never invoke this stub
6735 }
6736 }
6737
6738 void MacroAssembler::cache_wbsync(bool is_pre) {
6739 // we only need a barrier post sync
6740 if (!is_pre) {
6741 membar(Assembler::AnyAny);
6742 }
6743 }
6744
6745 void MacroAssembler::verify_sve_vector_length(Register tmp) {
6746 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) {
6747 return;
6748 }
6749 // Make sure that native code does not change SVE vector length.
6750 Label verify_ok;
6751 movw(tmp, zr);
6752 sve_inc(tmp, B);
6753 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
6754 br(EQ, verify_ok);
6755 stop("Error: SVE vector length has changed since jvm startup");
6756 bind(verify_ok);
6757 }
6758
6759 void MacroAssembler::verify_ptrue() {
6760 Label verify_ok;
6761 if (!UseSVE) {
6762 return;
6763 }
6764 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
6765 sve_dec(rscratch1, B);
6766 cbz(rscratch1, verify_ok);
6767 stop("Error: the preserved predicate register (p7) elements are not all true");
6768 bind(verify_ok);
6769 }
6770
6771 void MacroAssembler::safepoint_isb() {
6772 isb();
6773 #ifndef PRODUCT
6774 if (VerifyCrossModifyFence) {
6775 // Clear the thread state.
6776 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6777 }
6778 #endif
6779 }
6780
6781 #ifndef PRODUCT
6782 void MacroAssembler::verify_cross_modify_fence_not_required() {
6783 if (VerifyCrossModifyFence) {
6784 // Check if thread needs a cross modify fence.
6785 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6786 Label fence_not_required;
6787 cbz(rscratch1, fence_not_required);
6788 // If it does then fail.
6789 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
6790 mov(c_rarg0, rthread);
6791 blr(rscratch1);
6792 bind(fence_not_required);
6793 }
6794 }
6795 #endif
6796
6797 void MacroAssembler::spin_wait() {
6798 block_comment("spin_wait {");
6799 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
6800 switch (VM_Version::spin_wait_desc().inst()) {
6801 case SpinWait::NOP:
6802 nop();
6803 break;
6804 case SpinWait::ISB:
6805 isb();
6806 break;
6807 case SpinWait::YIELD:
6808 yield();
6809 break;
6810 case SpinWait::SB:
6811 assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
6812 sb();
6813 break;
6814 case SpinWait::WFET:
6815 spin_wait_wfet(VM_Version::spin_wait_desc().delay());
6816 break;
6817 default:
6818 ShouldNotReachHere();
6819 }
6820 }
6821 block_comment("}");
6822 }
6823
6824 void MacroAssembler::spin_wait_wfet(int delay_ns) {
6825 // The sequence assumes CNTFRQ_EL0 is fixed to 1GHz. The assumption is valid
6826 // starting from Armv8.6, according to the "D12.1.2 The system counter" of the
6827 // Arm Architecture Reference Manual for A-profile architecture version M.a.a.
6828 // This is sufficient because FEAT_WFXT is introduced from Armv8.6.
6829 Register target = rscratch1;
6830 Register current = rscratch2;
6831 get_cntvctss_el0(current);
6832 add(target, current, delay_ns);
6833
6834 Label L_wait_loop;
6835 bind(L_wait_loop);
6836
6837 wfet(target);
6838 get_cntvctss_el0(current);
6839
6840 cmp(current, target);
6841 br(LT, L_wait_loop);
6842
6843 sb();
6844 }
6845
6846 // Stack frame creation/removal
6847
6848 void MacroAssembler::enter(bool strip_ret_addr) {
6849 if (strip_ret_addr) {
6850 // Addresses can only be signed once. If there are multiple nested frames being created
6851 // in the same function, then the return address needs stripping first.
6852 strip_return_address();
6853 }
6854 protect_return_address();
6855 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6856 mov(rfp, sp);
6857 }
6858
6859 void MacroAssembler::leave() {
6860 mov(sp, rfp);
6861 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6862 authenticate_return_address();
6863 }
6864
6865 // ROP Protection
6866 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
6867 // destroying stack frames or whenever directly loading/storing the LR to memory.
6868 // If ROP protection is not set then these functions are no-ops.
6869 // For more details on PAC see pauth_aarch64.hpp.
6870
6871 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
6872 // Uses value zero as the modifier.
6873 //
6874 void MacroAssembler::protect_return_address() {
6875 if (VM_Version::use_rop_protection()) {
6876 check_return_address();
6877 paciaz();
6878 }
6879 }
6880
6881 // Sign the return value in the given register. Use before updating the LR in the existing stack
6882 // frame for the current function.
6883 // Uses value zero as the modifier.
6884 //
6885 void MacroAssembler::protect_return_address(Register return_reg) {
6886 if (VM_Version::use_rop_protection()) {
6887 check_return_address(return_reg);
6888 paciza(return_reg);
6889 }
6890 }
6891
6892 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
6893 // Uses value zero as the modifier.
6894 //
6895 void MacroAssembler::authenticate_return_address() {
6896 if (VM_Version::use_rop_protection()) {
6897 autiaz();
6898 check_return_address();
6899 }
6900 }
6901
6902 // Authenticate the return value in the given register. Use before updating the LR in the existing
6903 // stack frame for the current function.
6904 // Uses value zero as the modifier.
6905 //
6906 void MacroAssembler::authenticate_return_address(Register return_reg) {
6907 if (VM_Version::use_rop_protection()) {
6908 autiza(return_reg);
6909 check_return_address(return_reg);
6910 }
6911 }
6912
6913 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
6914 // there is no guaranteed way of authenticating the LR.
6915 //
6916 void MacroAssembler::strip_return_address() {
6917 if (VM_Version::use_rop_protection()) {
6918 xpaclri();
6919 }
6920 }
6921
6922 #ifndef PRODUCT
6923 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
6924 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
6925 // it is difficult to debug back to the callee function.
6926 // This function simply loads from the address in the given register.
6927 // Use directly after authentication to catch authentication failures.
6928 // Also use before signing to check that the pointer is valid and hasn't already been signed.
6929 //
6930 void MacroAssembler::check_return_address(Register return_reg) {
6931 if (VM_Version::use_rop_protection()) {
6932 ldr(zr, Address(return_reg));
6933 }
6934 }
6935 #endif
6936
6937 // The java_calling_convention describes stack locations as ideal slots on
6938 // a frame with no abi restrictions. Since we must observe abi restrictions
6939 // (like the placement of the register window) the slots must be biased by
6940 // the following value.
6941 static int reg2offset_in(VMReg r) {
6942 // Account for saved rfp and lr
6943 // This should really be in_preserve_stack_slots
6944 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
6945 }
6946
6947 static int reg2offset_out(VMReg r) {
6948 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
6949 }
6950
6951 // On 64bit we will store integer like items to the stack as
6952 // 64bits items (AArch64 ABI) even though java would only store
6953 // 32bits for a parameter. On 32bit it will simply be 32bits
6954 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
6955 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
6956 if (src.first()->is_stack()) {
6957 if (dst.first()->is_stack()) {
6958 // stack to stack
6959 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6960 str(tmp, Address(sp, reg2offset_out(dst.first())));
6961 } else {
6962 // stack to reg
6963 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
6964 }
6965 } else if (dst.first()->is_stack()) {
6966 // reg to stack
6967 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
6968 } else {
6969 if (dst.first() != src.first()) {
6970 sxtw(dst.first()->as_Register(), src.first()->as_Register());
6971 }
6972 }
6973 }
6974
6975 // An oop arg. Must pass a handle not the oop itself
6976 void MacroAssembler::object_move(
6977 OopMap* map,
6978 int oop_handle_offset,
6979 int framesize_in_slots,
6980 VMRegPair src,
6981 VMRegPair dst,
6982 bool is_receiver,
6983 int* receiver_offset) {
6984
6985 // must pass a handle. First figure out the location we use as a handle
6986
6987 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
6988
6989 // See if oop is null if it is we need no handle
6990
6991 if (src.first()->is_stack()) {
6992
6993 // Oop is already on the stack as an argument
6994 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
6995 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
6996 if (is_receiver) {
6997 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
6998 }
6999
7000 ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
7001 lea(rHandle, Address(rfp, reg2offset_in(src.first())));
7002 // conditionally move a null
7003 cmp(rscratch1, zr);
7004 csel(rHandle, zr, rHandle, Assembler::EQ);
7005 } else {
7006
7007 // Oop is in an a register we must store it to the space we reserve
7008 // on the stack for oop_handles and pass a handle if oop is non-null
7009
7010 const Register rOop = src.first()->as_Register();
7011 int oop_slot;
7012 if (rOop == j_rarg0)
7013 oop_slot = 0;
7014 else if (rOop == j_rarg1)
7015 oop_slot = 1;
7016 else if (rOop == j_rarg2)
7017 oop_slot = 2;
7018 else if (rOop == j_rarg3)
7019 oop_slot = 3;
7020 else if (rOop == j_rarg4)
7021 oop_slot = 4;
7022 else if (rOop == j_rarg5)
7023 oop_slot = 5;
7024 else if (rOop == j_rarg6)
7025 oop_slot = 6;
7026 else {
7027 assert(rOop == j_rarg7, "wrong register");
7028 oop_slot = 7;
7029 }
7030
7031 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
7032 int offset = oop_slot*VMRegImpl::stack_slot_size;
7033
7034 map->set_oop(VMRegImpl::stack2reg(oop_slot));
7035 // Store oop in handle area, may be null
7036 str(rOop, Address(sp, offset));
7037 if (is_receiver) {
7038 *receiver_offset = offset;
7039 }
7040
7041 cmp(rOop, zr);
7042 lea(rHandle, Address(sp, offset));
7043 // conditionally move a null
7044 csel(rHandle, zr, rHandle, Assembler::EQ);
7045 }
7046
7047 // If arg is on the stack then place it otherwise it is already in correct reg.
7048 if (dst.first()->is_stack()) {
7049 str(rHandle, Address(sp, reg2offset_out(dst.first())));
7050 }
7051 }
7052
7053 // A float arg may have to do float reg int reg conversion
7054 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
7055 if (src.first()->is_stack()) {
7056 if (dst.first()->is_stack()) {
7057 ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
7058 strw(tmp, Address(sp, reg2offset_out(dst.first())));
7059 } else {
7060 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7061 }
7062 } else if (src.first() != dst.first()) {
7063 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7064 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7065 else
7066 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7067 }
7068 }
7069
7070 // A long move
7071 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
7072 if (src.first()->is_stack()) {
7073 if (dst.first()->is_stack()) {
7074 // stack to stack
7075 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7076 str(tmp, Address(sp, reg2offset_out(dst.first())));
7077 } else {
7078 // stack to reg
7079 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7080 }
7081 } else if (dst.first()->is_stack()) {
7082 // reg to stack
7083 // Do we really have to sign extend???
7084 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
7085 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7086 } else {
7087 if (dst.first() != src.first()) {
7088 mov(dst.first()->as_Register(), src.first()->as_Register());
7089 }
7090 }
7091 }
7092
7093
7094 // A double move
7095 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
7096 if (src.first()->is_stack()) {
7097 if (dst.first()->is_stack()) {
7098 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7099 str(tmp, Address(sp, reg2offset_out(dst.first())));
7100 } else {
7101 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7102 }
7103 } else if (src.first() != dst.first()) {
7104 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7105 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7106 else
7107 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7108 }
7109 }
7110
7111 // Implements fast-locking.
7112 //
7113 // - obj: the object to be locked
7114 // - t1, t2, t3: temporary registers, will be destroyed
7115 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
7116 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
7117 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
7118
7119 Label push;
7120 const Register top = t1;
7121 const Register mark = t2;
7122 const Register t = t3;
7123
7124 // Preload the markWord. It is important that this is the first
7125 // instruction emitted as it is part of C1's null check semantics.
7126 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7127
7128 if (UseObjectMonitorTable) {
7129 // Clear cache in case fast locking succeeds or we need to take the slow-path.
7130 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
7131 }
7132
7133 if (DiagnoseSyncOnValueBasedClasses != 0) {
7134 load_klass(t1, obj);
7135 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
7136 tst(t1, KlassFlags::_misc_is_value_based_class);
7137 br(Assembler::NE, slow);
7138 }
7139
7140 // Check if the lock-stack is full.
7141 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7142 cmpw(top, (unsigned)LockStack::end_offset());
7143 br(Assembler::GE, slow);
7144
7145 // Check for recursion.
7146 subw(t, top, oopSize);
7147 ldr(t, Address(rthread, t));
7148 cmp(obj, t);
7149 br(Assembler::EQ, push);
7150
7151 // Check header for monitor (0b10).
7152 tst(mark, markWord::monitor_value);
7153 br(Assembler::NE, slow);
7154
7155 // Try to lock. Transition lock bits 0b01 => 0b00
7156 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7157 orr(mark, mark, markWord::unlocked_value);
7158 eor(t, mark, markWord::unlocked_value);
7159 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
7160 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
7161 br(Assembler::NE, slow);
7162
7163 bind(push);
7164 // After successful lock, push object on lock-stack.
7165 str(obj, Address(rthread, top));
7166 addw(top, top, oopSize);
7167 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7168 }
7169
7170 // Implements fast-unlocking.
7171 //
7172 // - obj: the object to be unlocked
7173 // - t1, t2, t3: temporary registers
7174 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
7175 void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
7176 // cmpxchg clobbers rscratch1.
7177 assert_different_registers(obj, t1, t2, t3, rscratch1);
7178
7179 #ifdef ASSERT
7180 {
7181 // Check for lock-stack underflow.
7182 Label stack_ok;
7183 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7184 cmpw(t1, (unsigned)LockStack::start_offset());
7185 br(Assembler::GE, stack_ok);
7186 STOP("Lock-stack underflow");
7187 bind(stack_ok);
7188 }
7189 #endif
7190
7191 Label unlocked, push_and_slow;
7192 const Register top = t1;
7193 const Register mark = t2;
7194 const Register t = t3;
7195
7196 // Check if obj is top of lock-stack.
7197 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7198 subw(top, top, oopSize);
7199 ldr(t, Address(rthread, top));
7200 cmp(obj, t);
7201 br(Assembler::NE, slow);
7202
7203 // Pop lock-stack.
7204 DEBUG_ONLY(str(zr, Address(rthread, top));)
7205 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7206
7207 // Check if recursive.
7208 subw(t, top, oopSize);
7209 ldr(t, Address(rthread, t));
7210 cmp(obj, t);
7211 br(Assembler::EQ, unlocked);
7212
7213 // Not recursive. Check header for monitor (0b10).
7214 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7215 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
7216
7217 #ifdef ASSERT
7218 // Check header not unlocked (0b01).
7219 Label not_unlocked;
7220 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
7221 stop("fast_unlock already unlocked");
7222 bind(not_unlocked);
7223 #endif
7224
7225 // Try to unlock. Transition lock bits 0b00 => 0b01
7226 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7227 orr(t, mark, markWord::unlocked_value);
7228 cmpxchg(obj, mark, t, Assembler::xword,
7229 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
7230 br(Assembler::EQ, unlocked);
7231
7232 bind(push_and_slow);
7233 // Restore lock-stack and handle the unlock in runtime.
7234 DEBUG_ONLY(str(obj, Address(rthread, top));)
7235 addw(top, top, oopSize);
7236 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7237 b(slow);
7238
7239 bind(unlocked);
7240 }