1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciInlineKlass.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/resolvedFieldEntry.hpp"
52 #include "runtime/continuation.hpp"
53 #include "runtime/globals.hpp"
54 #include "runtime/icache.hpp"
55 #include "runtime/interfaceSupport.inline.hpp"
56 #include "runtime/javaThread.hpp"
57 #include "runtime/jniHandles.inline.hpp"
58 #include "runtime/sharedRuntime.hpp"
59 #include "runtime/signature_cc.hpp"
60 #include "runtime/stubRoutines.hpp"
61 #include "utilities/globalDefinitions.hpp"
62 #include "utilities/powerOfTwo.hpp"
63 #include "vmreg_aarch64.inline.hpp"
64 #ifdef COMPILER1
65 #include "c1/c1_LIRAssembler.hpp"
66 #endif
67 #ifdef COMPILER2
68 #include "oops/oop.hpp"
69 #include "opto/compile.hpp"
70 #include "opto/node.hpp"
71 #include "opto/output.hpp"
72 #endif
73
74 #include <sys/types.h>
75
76 #ifdef PRODUCT
77 #define BLOCK_COMMENT(str) /* nothing */
78 #else
79 #define BLOCK_COMMENT(str) block_comment(str)
80 #endif
81 #define STOP(str) stop(str);
82 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
83
84 #ifdef ASSERT
85 extern "C" void disnm(intptr_t p);
86 #endif
87 // Target-dependent relocation processing
88 //
89 // Instruction sequences whose target may need to be retrieved or
90 // patched are distinguished by their leading instruction, sorting
91 // them into three main instruction groups and related subgroups.
92 //
93 // 1) Branch, Exception and System (insn count = 1)
94 // 1a) Unconditional branch (immediate):
95 // b/bl imm19
96 // 1b) Compare & branch (immediate):
97 // cbz/cbnz Rt imm19
98 // 1c) Test & branch (immediate):
99 // tbz/tbnz Rt imm14
100 // 1d) Conditional branch (immediate):
101 // b.cond imm19
102 //
103 // 2) Loads and Stores (insn count = 1)
104 // 2a) Load register literal:
105 // ldr Rt imm19
106 //
107 // 3) Data Processing Immediate (insn count = 2 or 3)
108 // 3a) PC-rel. addressing
109 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12
110 // adr/adrp Rx imm21; add Ry Rx #imm12
111 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
112 // adr/adrp Rx imm21
113 // adr/adrp Rx imm21; movk Rx #imm16<<32
114 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
115 // The latter form can only happen when the target is an
116 // ExternalAddress, and (by definition) ExternalAddresses don't
117 // move. Because of that property, there is never any need to
118 // patch the last of the three instructions. However,
119 // MacroAssembler::target_addr_for_insn takes all three
120 // instructions into account and returns the correct address.
121 // 3b) Move wide (immediate)
122 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
123 //
124 // A switch on a subset of the instruction's bits provides an
125 // efficient dispatch to these subcases.
126 //
127 // insn[28:26] -> main group ('x' == don't care)
128 // 00x -> UNALLOCATED
129 // 100 -> Data Processing Immediate
130 // 101 -> Branch, Exception and System
131 // x1x -> Loads and Stores
132 //
133 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
134 // n.b. in some cases extra bits need to be checked to verify the
135 // instruction is as expected
136 //
137 // 1) ... xx101x Branch, Exception and System
138 // 1a) 00___x Unconditional branch (immediate)
139 // 1b) 01___0 Compare & branch (immediate)
140 // 1c) 01___1 Test & branch (immediate)
141 // 1d) 10___0 Conditional branch (immediate)
142 // other Should not happen
143 //
144 // 2) ... xxx1x0 Loads and Stores
145 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
146 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
147 // strictly should be 64 bit non-FP/SIMD i.e.
148 // 0101_000 (i.e. requires insn[31:24] == 01011000)
149 //
150 // 3) ... xx100x Data Processing Immediate
151 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
152 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
153 // strictly should be 64 bit movz #imm16<<0
154 // 110___10100 (i.e. requires insn[31:21] == 11010010100)
155 //
156 class RelocActions {
157 protected:
158 typedef int (*reloc_insn)(address insn_addr, address &target);
159
160 virtual reloc_insn adrpMem() = 0;
161 virtual reloc_insn adrpAdd() = 0;
162 virtual reloc_insn adrpMovk() = 0;
163
164 const address _insn_addr;
165 const uint32_t _insn;
166
167 static uint32_t insn_at(address insn_addr, int n) {
168 return ((uint32_t*)insn_addr)[n];
169 }
170 uint32_t insn_at(int n) const {
171 return insn_at(_insn_addr, n);
172 }
173
174 public:
175
176 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {}
177 RelocActions(address insn_addr, uint32_t insn)
178 : _insn_addr(insn_addr), _insn(insn) {}
179
180 virtual int unconditionalBranch(address insn_addr, address &target) = 0;
181 virtual int conditionalBranch(address insn_addr, address &target) = 0;
182 virtual int testAndBranch(address insn_addr, address &target) = 0;
183 virtual int loadStore(address insn_addr, address &target) = 0;
184 virtual int adr(address insn_addr, address &target) = 0;
185 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0;
186 virtual int immediate(address insn_addr, address &target) = 0;
187 virtual void verify(address insn_addr, address &target) = 0;
188
189 int ALWAYSINLINE run(address insn_addr, address &target) {
190 int instructions = 1;
191
192 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25);
193 switch(dispatch) {
194 case 0b001010:
195 case 0b001011: {
196 instructions = unconditionalBranch(insn_addr, target);
197 break;
198 }
199 case 0b101010: // Conditional branch (immediate)
200 case 0b011010: { // Compare & branch (immediate)
201 instructions = conditionalBranch(insn_addr, target);
202 break;
203 }
204 case 0b011011: {
205 instructions = testAndBranch(insn_addr, target);
206 break;
207 }
208 case 0b001100:
209 case 0b001110:
210 case 0b011100:
211 case 0b011110:
212 case 0b101100:
213 case 0b101110:
214 case 0b111100:
215 case 0b111110: {
216 // load/store
217 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) {
218 // Load register (literal)
219 instructions = loadStore(insn_addr, target);
220 break;
221 } else {
222 // nothing to do
223 assert(target == nullptr, "did not expect to relocate target for polling page load");
224 }
225 break;
226 }
227 case 0b001000:
228 case 0b011000:
229 case 0b101000:
230 case 0b111000: {
231 // adr/adrp
232 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
233 int shift = Instruction_aarch64::extract(_insn, 31, 31);
234 if (shift) {
235 uint32_t insn2 = insn_at(1);
236 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
237 Instruction_aarch64::extract(_insn, 4, 0) ==
238 Instruction_aarch64::extract(insn2, 9, 5)) {
239 instructions = adrp(insn_addr, target, adrpMem());
240 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
241 Instruction_aarch64::extract(_insn, 4, 0) ==
242 Instruction_aarch64::extract(insn2, 4, 0)) {
243 instructions = adrp(insn_addr, target, adrpAdd());
244 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
245 Instruction_aarch64::extract(_insn, 4, 0) ==
246 Instruction_aarch64::extract(insn2, 4, 0)) {
247 instructions = adrp(insn_addr, target, adrpMovk());
248 } else {
249 ShouldNotReachHere();
250 }
251 } else {
252 instructions = adr(insn_addr, target);
253 }
254 break;
255 }
256 case 0b001001:
257 case 0b011001:
258 case 0b101001:
259 case 0b111001: {
260 instructions = immediate(insn_addr, target);
261 break;
262 }
263 default: {
264 ShouldNotReachHere();
265 }
266 }
267
268 verify(insn_addr, target);
269 return instructions * NativeInstruction::instruction_size;
270 }
271 };
272
273 class Patcher : public RelocActions {
274 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; }
275 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; }
276 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; }
277
278 public:
279 Patcher(address insn_addr) : RelocActions(insn_addr) {}
280
281 virtual int unconditionalBranch(address insn_addr, address &target) {
282 intptr_t offset = (target - insn_addr) >> 2;
283 Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
284 return 1;
285 }
286 virtual int conditionalBranch(address insn_addr, address &target) {
287 intptr_t offset = (target - insn_addr) >> 2;
288 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
289 return 1;
290 }
291 virtual int testAndBranch(address insn_addr, address &target) {
292 intptr_t offset = (target - insn_addr) >> 2;
293 Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
294 return 1;
295 }
296 virtual int loadStore(address insn_addr, address &target) {
297 intptr_t offset = (target - insn_addr) >> 2;
298 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
299 return 1;
300 }
301 virtual int adr(address insn_addr, address &target) {
302 #ifdef ASSERT
303 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
304 #endif
305 // PC-rel. addressing
306 ptrdiff_t offset = target - insn_addr;
307 int offset_lo = offset & 3;
308 offset >>= 2;
309 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
310 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
311 return 1;
312 }
313 virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
314 int instructions = 1;
315 #ifdef ASSERT
316 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
317 #endif
318 ptrdiff_t offset = target - insn_addr;
319 instructions = 2;
320 precond(inner != nullptr);
321 // Give the inner reloc a chance to modify the target.
322 address adjusted_target = target;
323 instructions = (*inner)(insn_addr, adjusted_target);
324 uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
325 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
326 offset = adr_page - pc_page;
327 int offset_lo = offset & 3;
328 offset >>= 2;
329 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
330 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
331 return instructions;
332 }
333 static int adrpMem_impl(address insn_addr, address &target) {
334 uintptr_t dest = (uintptr_t)target;
335 int offset_lo = dest & 0xfff;
336 uint32_t insn2 = insn_at(insn_addr, 1);
337 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
338 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
339 guarantee(((dest >> size) << size) == dest, "misaligned target");
340 return 2;
341 }
342 static int adrpAdd_impl(address insn_addr, address &target) {
343 uintptr_t dest = (uintptr_t)target;
344 int offset_lo = dest & 0xfff;
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
346 return 2;
347 }
348 static int adrpMovk_impl(address insn_addr, address &target) {
349 uintptr_t dest = uintptr_t(target);
350 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
351 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
352 target = address(dest);
353 return 2;
354 }
355 virtual int immediate(address insn_addr, address &target) {
356 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
357 uint64_t dest = (uint64_t)target;
358 // Move wide constant
359 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
360 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
361 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
362 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
363 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
364 return 3;
365 }
366 virtual void verify(address insn_addr, address &target) {
367 #ifdef ASSERT
368 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
369 if (!(address_is == target)) {
370 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
371 disnm((intptr_t)insn_addr);
372 assert(address_is == target, "should be");
373 }
374 #endif
375 }
376 };
377
378 // If insn1 and insn2 use the same register to form an address, either
379 // by an offsetted LDR or a simple ADD, return the offset. If the
380 // second instruction is an LDR, the offset may be scaled.
381 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
382 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
383 Instruction_aarch64::extract(insn1, 4, 0) ==
384 Instruction_aarch64::extract(insn2, 9, 5)) {
385 // Load/store register (unsigned immediate)
386 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
387 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
388 byte_offset <<= size;
389 return true;
390 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
391 Instruction_aarch64::extract(insn1, 4, 0) ==
392 Instruction_aarch64::extract(insn2, 4, 0)) {
393 // add (immediate)
394 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
395 return true;
396 }
397 return false;
398 }
399
400 class AArch64Decoder : public RelocActions {
401 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; }
402 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; }
403 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; }
404
405 public:
406 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
407
408 virtual int loadStore(address insn_addr, address &target) {
409 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
410 target = insn_addr + (offset << 2);
411 return 1;
412 }
413 virtual int unconditionalBranch(address insn_addr, address &target) {
414 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0);
415 target = insn_addr + (offset << 2);
416 return 1;
417 }
418 virtual int conditionalBranch(address insn_addr, address &target) {
419 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
420 target = address(((uint64_t)insn_addr + (offset << 2)));
421 return 1;
422 }
423 virtual int testAndBranch(address insn_addr, address &target) {
424 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5);
425 target = address(((uint64_t)insn_addr + (offset << 2)));
426 return 1;
427 }
428 virtual int adr(address insn_addr, address &target) {
429 // PC-rel. addressing
430 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
431 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
432 target = address((uint64_t)insn_addr + offset);
433 return 1;
434 }
435 virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
436 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
437 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
438 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
439 int shift = 12;
440 offset <<= shift;
441 uint64_t target_page = ((uint64_t)insn_addr) + offset;
442 target_page &= ((uint64_t)-1) << shift;
443 uint32_t insn2 = insn_at(1);
444 target = address(target_page);
445 precond(inner != nullptr);
446 (*inner)(insn_addr, target);
447 return 2;
448 }
449 static int adrpMem_impl(address insn_addr, address &target) {
450 uint32_t insn2 = insn_at(insn_addr, 1);
451 // Load/store register (unsigned immediate)
452 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
453 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
454 byte_offset <<= size;
455 target += byte_offset;
456 return 2;
457 }
458 static int adrpAdd_impl(address insn_addr, address &target) {
459 uint32_t insn2 = insn_at(insn_addr, 1);
460 // add (immediate)
461 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
462 target += byte_offset;
463 return 2;
464 }
465 static int adrpMovk_impl(address insn_addr, address &target) {
466 uint32_t insn2 = insn_at(insn_addr, 1);
467 uint64_t dest = uint64_t(target);
468 dest = (dest & 0xffff0000ffffffff) |
469 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
470 target = address(dest);
471
472 // We know the destination 4k page. Maybe we have a third
473 // instruction.
474 uint32_t insn = insn_at(insn_addr, 0);
475 uint32_t insn3 = insn_at(insn_addr, 2);
476 ptrdiff_t byte_offset;
477 if (offset_for(insn, insn3, byte_offset)) {
478 target += byte_offset;
479 return 3;
480 } else {
481 return 2;
482 }
483 }
484 virtual int immediate(address insn_addr, address &target) {
485 uint32_t *insns = (uint32_t *)insn_addr;
486 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
487 // Move wide constant: movz, movk, movk. See movptr().
488 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
490 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
491 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
492 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
493 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
494 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
495 return 3;
496 }
497 virtual void verify(address insn_addr, address &target) {
498 }
499 };
500
501 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
502 AArch64Decoder decoder(insn_addr, insn);
503 address target;
504 decoder.run(insn_addr, target);
505 return target;
506 }
507
508 // Patch any kind of instruction; there may be several instructions.
509 // Return the total length (in bytes) of the instructions.
510 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
511 Patcher patcher(insn_addr);
512 return patcher.run(insn_addr, target);
513 }
514
515 int MacroAssembler::patch_oop(address insn_addr, address o) {
516 int instructions;
517 unsigned insn = *(unsigned*)insn_addr;
518 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
519
520 // OOPs are either narrow (32 bits) or wide (48 bits). We encode
521 // narrow OOPs by setting the upper 16 bits in the first
522 // instruction.
523 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
524 // Move narrow OOP
525 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
526 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
527 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
528 instructions = 2;
529 } else {
530 // Move wide OOP
531 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
532 uintptr_t dest = (uintptr_t)o;
533 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
534 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
535 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
536 instructions = 3;
537 }
538 return instructions * NativeInstruction::instruction_size;
539 }
540
541 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
542 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
543 // We encode narrow ones by setting the upper 16 bits in the first
544 // instruction.
545 NativeInstruction *insn = nativeInstruction_at(insn_addr);
546 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
547 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
548
549 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
550 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
551 return 2 * NativeInstruction::instruction_size;
552 }
553
554 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
555 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
556 return nullptr;
557 }
558 return MacroAssembler::target_addr_for_insn(insn_addr, insn);
559 }
560
561 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
562 ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
563 if (at_return) {
564 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
565 // we may safely use the sp instead to perform the stack watermark check.
566 cmp(in_nmethod ? sp : rfp, tmp);
567 br(Assembler::HI, slow_path);
568 } else {
569 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
570 }
571 }
572
573 void MacroAssembler::rt_call(address dest, Register tmp) {
574 CodeBlob *cb = CodeCache::find_blob(dest);
575 if (cb) {
576 far_call(RuntimeAddress(dest));
577 } else {
578 lea(tmp, RuntimeAddress(dest));
579 blr(tmp);
580 }
581 }
582
583 void MacroAssembler::push_cont_fastpath(Register java_thread) {
584 if (!Continuations::enabled()) return;
585 Label done;
586 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
587 cmp(sp, rscratch1);
588 br(Assembler::LS, done);
589 mov(rscratch1, sp); // we can't use sp as the source in str
590 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
591 bind(done);
592 }
593
594 void MacroAssembler::pop_cont_fastpath(Register java_thread) {
595 if (!Continuations::enabled()) return;
596 Label done;
597 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
598 cmp(sp, rscratch1);
599 br(Assembler::LO, done);
600 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
601 bind(done);
602 }
603
604 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
605 // we must set sp to zero to clear frame
606 str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
607
608 // must clear fp, so that compiled frames are not confused; it is
609 // possible that we need it only for debugging
610 if (clear_fp) {
611 str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
612 }
613
614 // Always clear the pc because it could have been set by make_walkable()
615 str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
616 }
617
618 // Calls to C land
619 //
620 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
621 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
622 // has to be reset to 0. This is required to allow proper stack traversal.
623 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
624 Register last_java_fp,
625 Register last_java_pc,
626 Register scratch) {
627
628 if (last_java_pc->is_valid()) {
629 str(last_java_pc, Address(rthread,
630 JavaThread::frame_anchor_offset()
631 + JavaFrameAnchor::last_Java_pc_offset()));
632 }
633
634 // determine last_java_sp register
635 if (last_java_sp == sp) {
636 mov(scratch, sp);
637 last_java_sp = scratch;
638 } else if (!last_java_sp->is_valid()) {
639 last_java_sp = esp;
640 }
641
642 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
643
644 // last_java_fp is optional
645 if (last_java_fp->is_valid()) {
646 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
647 }
648 }
649
650 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
651 Register last_java_fp,
652 address last_java_pc,
653 Register scratch) {
654 assert(last_java_pc != nullptr, "must provide a valid PC");
655
656 adr(scratch, last_java_pc);
657 str(scratch, Address(rthread,
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 if (AOTCodeCache::is_on_for_dump()) {
679 return true;
680 }
681 // codecache size <= 128M
682 if (!MacroAssembler::far_branches()) {
683 return false;
684 }
685 // codecache size > 240M
686 if (MacroAssembler::codestub_branch_needs_far_jump()) {
687 return true;
688 }
689 // codecache size: 128M..240M
690 return !CodeCache::is_non_nmethod(addr);
691 }
692
693 void MacroAssembler::far_call(Address entry, Register tmp) {
694 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
695 assert(CodeCache::find_blob(entry.target()) != nullptr,
696 "destination of far call not found in code cache");
697 assert(entry.rspec().type() == relocInfo::external_word_type
698 || entry.rspec().type() == relocInfo::runtime_call_type
699 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
700 if (target_needs_far_branch(entry.target())) {
701 uint64_t offset;
702 // We can use ADRP here because we know that the total size of
703 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
704 adrp(tmp, entry, offset);
705 add(tmp, tmp, offset);
706 blr(tmp);
707 } else {
708 bl(entry);
709 }
710 }
711
712 int MacroAssembler::far_jump(Address entry, Register tmp) {
713 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
714 assert(CodeCache::find_blob(entry.target()) != nullptr,
715 "destination of far call not found in code cache");
716 assert(entry.rspec().type() == relocInfo::external_word_type
717 || entry.rspec().type() == relocInfo::runtime_call_type
718 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
719 address start = pc();
720 if (target_needs_far_branch(entry.target())) {
721 uint64_t offset;
722 // We can use ADRP here because we know that the total size of
723 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
724 adrp(tmp, entry, offset);
725 add(tmp, tmp, offset);
726 br(tmp);
727 } else {
728 b(entry);
729 }
730 return pc() - start;
731 }
732
733 void MacroAssembler::reserved_stack_check() {
734 // testing if reserved zone needs to be enabled
735 Label no_reserved_zone_enabling;
736
737 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
738 cmp(sp, rscratch1);
739 br(Assembler::LO, no_reserved_zone_enabling);
740
741 enter(); // LR and FP are live.
742 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)));
743 mov(c_rarg0, rthread);
744 blr(rscratch1);
745 leave();
746
747 // We have already removed our own frame.
748 // throw_delayed_StackOverflowError will think that it's been
749 // called by our caller.
750 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
751 br(rscratch1);
752 should_not_reach_here();
753
754 bind(no_reserved_zone_enabling);
755 }
756
757 static void pass_arg0(MacroAssembler* masm, Register arg) {
758 if (c_rarg0 != arg ) {
759 masm->mov(c_rarg0, arg);
760 }
761 }
762
763 static void pass_arg1(MacroAssembler* masm, Register arg) {
764 if (c_rarg1 != arg ) {
765 masm->mov(c_rarg1, arg);
766 }
767 }
768
769 static void pass_arg2(MacroAssembler* masm, Register arg) {
770 if (c_rarg2 != arg ) {
771 masm->mov(c_rarg2, arg);
772 }
773 }
774
775 static void pass_arg3(MacroAssembler* masm, Register arg) {
776 if (c_rarg3 != arg ) {
777 masm->mov(c_rarg3, arg);
778 }
779 }
780
781 static bool is_preemptable(address entry_point) {
782 return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter);
783 }
784
785 void MacroAssembler::call_VM_base(Register oop_result,
786 Register java_thread,
787 Register last_java_sp,
788 address entry_point,
789 int number_of_arguments,
790 bool check_exceptions) {
791 // determine java_thread register
792 if (!java_thread->is_valid()) {
793 java_thread = rthread;
794 }
795
796 // determine last_java_sp register
797 if (!last_java_sp->is_valid()) {
798 last_java_sp = esp;
799 }
800
801 // debugging support
802 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
803 assert(java_thread == rthread, "unexpected register");
804 #ifdef ASSERT
805 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
806 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
807 #endif // ASSERT
808
809 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
810 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
811
812 // push java thread (becomes first argument of C function)
813
814 mov(c_rarg0, java_thread);
815
816 // set last Java frame before call
817 assert(last_java_sp != rfp, "can't use rfp");
818
819 Label l;
820 if (is_preemptable(entry_point)) {
821 // skip setting last_pc since we already set it to desired value.
822 set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1);
823 } else {
824 set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
825 }
826
827 // do the call, remove parameters
828 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
829
830 // lr could be poisoned with PAC signature during throw_pending_exception
831 // if it was tail-call optimized by compiler, since lr is not callee-saved
832 // reload it with proper value
833 adr(lr, l);
834
835 // reset last Java frame
836 // Only interpreter should have to clear fp
837 reset_last_Java_frame(true);
838
839 // C++ interp handles this in the interpreter
840 check_and_handle_popframe(java_thread);
841 check_and_handle_earlyret(java_thread);
842
843 if (check_exceptions) {
844 // check for pending exceptions (java_thread is set upon return)
845 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
846 Label ok;
847 cbz(rscratch1, ok);
848 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
849 br(rscratch1);
850 bind(ok);
851 }
852
853 // get oop result if there is one and reset the value in the thread
854 if (oop_result->is_valid()) {
855 get_vm_result_oop(oop_result, java_thread);
856 }
857 }
858
859 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
860 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
861 }
862
863 // Check the entry target is always reachable from any branch.
864 static bool is_always_within_branch_range(Address entry) {
865 if (AOTCodeCache::is_on_for_dump()) {
866 return false;
867 }
868 const address target = entry.target();
869
870 if (!CodeCache::contains(target)) {
871 // We always use trampolines for callees outside CodeCache.
872 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
873 return false;
874 }
875
876 if (!MacroAssembler::far_branches()) {
877 return true;
878 }
879
880 if (entry.rspec().type() == relocInfo::runtime_call_type) {
881 // Runtime calls are calls of a non-compiled method (stubs, adapters).
882 // Non-compiled methods stay forever in CodeCache.
883 // We check whether the longest possible branch is within the branch range.
884 assert(CodeCache::find_blob(target) != nullptr &&
885 !CodeCache::find_blob(target)->is_nmethod(),
886 "runtime call of compiled method");
887 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
888 const address left_longest_branch_start = CodeCache::low_bound();
889 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
890 Assembler::reachable_from_branch_at(right_longest_branch_start, target);
891 return is_reachable;
892 }
893
894 return false;
895 }
896
897 // Maybe emit a call via a trampoline. If the code cache is small
898 // trampolines won't be emitted.
899 address MacroAssembler::trampoline_call(Address entry) {
900 assert(entry.rspec().type() == relocInfo::runtime_call_type
901 || entry.rspec().type() == relocInfo::opt_virtual_call_type
902 || entry.rspec().type() == relocInfo::static_call_type
903 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
904
905 address target = entry.target();
906
907 if (!is_always_within_branch_range(entry)) {
908 if (!in_scratch_emit_size()) {
909 // We don't want to emit a trampoline if C2 is generating dummy
910 // code during its branch shortening phase.
911 if (entry.rspec().type() == relocInfo::runtime_call_type) {
912 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
913 code()->share_trampoline_for(entry.target(), offset());
914 } else {
915 address stub = emit_trampoline_stub(offset(), target);
916 if (stub == nullptr) {
917 postcond(pc() == badAddress);
918 return nullptr; // CodeCache is full
919 }
920 }
921 }
922 target = pc();
923 }
924
925 address call_pc = pc();
926 relocate(entry.rspec());
927 bl(target);
928
929 postcond(pc() != badAddress);
930 return call_pc;
931 }
932
933 // Emit a trampoline stub for a call to a target which is too far away.
934 //
935 // code sequences:
936 //
937 // call-site:
938 // branch-and-link to <destination> or <trampoline stub>
939 //
940 // Related trampoline stub for this call site in the stub section:
941 // load the call target from the constant pool
942 // branch (LR still points to the call site above)
943
944 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
945 address dest) {
946 // Max stub size: alignment nop, TrampolineStub.
947 address stub = start_a_stub(max_trampoline_stub_size());
948 if (stub == nullptr) {
949 return nullptr; // CodeBuffer::expand failed
950 }
951
952 // Create a trampoline stub relocation which relates this trampoline stub
953 // with the call instruction at insts_call_instruction_offset in the
954 // instructions code-section.
955 align(wordSize);
956 relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
957 + insts_call_instruction_offset));
958 const int stub_start_offset = offset();
959
960 // Now, create the trampoline stub's code:
961 // - load the call
962 // - call
963 Label target;
964 ldr(rscratch1, target);
965 br(rscratch1);
966 bind(target);
967 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
968 "should be");
969 emit_int64((int64_t)dest);
970
971 const address stub_start_addr = addr_at(stub_start_offset);
972
973 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
974
975 end_a_stub();
976 return stub_start_addr;
977 }
978
979 int MacroAssembler::max_trampoline_stub_size() {
980 // Max stub size: alignment nop, TrampolineStub.
981 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
982 }
983
984 void MacroAssembler::emit_static_call_stub() {
985 // CompiledDirectCall::set_to_interpreted knows the
986 // exact layout of this stub.
987
988 isb();
989 mov_metadata(rmethod, nullptr);
990
991 // Jump to the entry point of the c2i stub.
992 if (codestub_branch_needs_far_jump()) {
993 movptr(rscratch1, 0);
994 br(rscratch1);
995 } else {
996 b(pc());
997 }
998 }
999
1000 int MacroAssembler::static_call_stub_size() {
1001 if (!codestub_branch_needs_far_jump()) {
1002 // isb; movk; movz; movz; b
1003 return 5 * NativeInstruction::instruction_size;
1004 }
1005 // isb; movk; movz; movz; movk; movz; movz; br
1006 return 8 * NativeInstruction::instruction_size;
1007 }
1008
1009 void MacroAssembler::c2bool(Register x) {
1010 // implements x == 0 ? 0 : 1
1011 // note: must only look at least-significant byte of x
1012 // since C-style booleans are stored in one byte
1013 // only! (was bug)
1014 tst(x, 0xff);
1015 cset(x, Assembler::NE);
1016 }
1017
1018 address MacroAssembler::ic_call(address entry, jint method_index) {
1019 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
1020 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
1021 return trampoline_call(Address(entry, rh));
1022 }
1023
1024 int MacroAssembler::ic_check_size() {
1025 int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
1026 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
1027 return NativeInstruction::instruction_size * (7 + extra_instructions);
1028 } else {
1029 return NativeInstruction::instruction_size * (5 + extra_instructions);
1030 }
1031 }
1032
1033 int MacroAssembler::ic_check(int end_alignment) {
1034 Register receiver = j_rarg0;
1035 Register data = rscratch2;
1036 Register tmp1 = rscratch1;
1037 Register tmp2 = r10;
1038
1039 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1040 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1041 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1042 // before the inline cache check here, and not after
1043 align(end_alignment, offset() + ic_check_size());
1044
1045 int uep_offset = offset();
1046
1047 if (UseCompactObjectHeaders) {
1048 load_narrow_klass_compact(tmp1, receiver);
1049 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1050 cmpw(tmp1, tmp2);
1051 } else if (UseCompressedClassPointers) {
1052 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1053 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1054 cmpw(tmp1, tmp2);
1055 } else {
1056 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1057 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1058 cmp(tmp1, tmp2);
1059 }
1060
1061 Label dont;
1062 br(Assembler::EQ, dont);
1063 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1064 bind(dont);
1065 assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1066
1067 return uep_offset;
1068 }
1069
1070 // Implementation of call_VM versions
1071
1072 void MacroAssembler::call_VM(Register oop_result,
1073 address entry_point,
1074 bool check_exceptions) {
1075 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1076 }
1077
1078 void MacroAssembler::call_VM(Register oop_result,
1079 address entry_point,
1080 Register arg_1,
1081 bool check_exceptions) {
1082 pass_arg1(this, arg_1);
1083 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1084 }
1085
1086 void MacroAssembler::call_VM(Register oop_result,
1087 address entry_point,
1088 Register arg_1,
1089 Register arg_2,
1090 bool check_exceptions) {
1091 assert_different_registers(arg_1, c_rarg2);
1092 pass_arg2(this, arg_2);
1093 pass_arg1(this, arg_1);
1094 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1095 }
1096
1097 void MacroAssembler::call_VM(Register oop_result,
1098 address entry_point,
1099 Register arg_1,
1100 Register arg_2,
1101 Register arg_3,
1102 bool check_exceptions) {
1103 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1104 assert_different_registers(arg_2, c_rarg3);
1105 pass_arg3(this, arg_3);
1106
1107 pass_arg2(this, arg_2);
1108
1109 pass_arg1(this, arg_1);
1110 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1111 }
1112
1113 void MacroAssembler::call_VM(Register oop_result,
1114 Register last_java_sp,
1115 address entry_point,
1116 int number_of_arguments,
1117 bool check_exceptions) {
1118 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1119 }
1120
1121 void MacroAssembler::call_VM(Register oop_result,
1122 Register last_java_sp,
1123 address entry_point,
1124 Register arg_1,
1125 bool check_exceptions) {
1126 pass_arg1(this, arg_1);
1127 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1128 }
1129
1130 void MacroAssembler::call_VM(Register oop_result,
1131 Register last_java_sp,
1132 address entry_point,
1133 Register arg_1,
1134 Register arg_2,
1135 bool check_exceptions) {
1136
1137 assert_different_registers(arg_1, c_rarg2);
1138 pass_arg2(this, arg_2);
1139 pass_arg1(this, arg_1);
1140 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1141 }
1142
1143 void MacroAssembler::call_VM(Register oop_result,
1144 Register last_java_sp,
1145 address entry_point,
1146 Register arg_1,
1147 Register arg_2,
1148 Register arg_3,
1149 bool check_exceptions) {
1150 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1151 assert_different_registers(arg_2, c_rarg3);
1152 pass_arg3(this, arg_3);
1153 pass_arg2(this, arg_2);
1154 pass_arg1(this, arg_1);
1155 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1156 }
1157
1158
1159 void MacroAssembler::get_vm_result_oop(Register oop_result, Register java_thread) {
1160 ldr(oop_result, Address(java_thread, JavaThread::vm_result_oop_offset()));
1161 str(zr, Address(java_thread, JavaThread::vm_result_oop_offset()));
1162 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1163 }
1164
1165 void MacroAssembler::get_vm_result_metadata(Register metadata_result, Register java_thread) {
1166 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1167 str(zr, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1168 }
1169
1170 void MacroAssembler::align(int modulus) {
1171 align(modulus, offset());
1172 }
1173
1174 // Ensure that the code at target bytes offset from the current offset() is aligned
1175 // according to modulus.
1176 void MacroAssembler::align(int modulus, int target) {
1177 int delta = target - offset();
1178 while ((offset() + delta) % modulus != 0) nop();
1179 }
1180
1181 void MacroAssembler::post_call_nop() {
1182 if (!Continuations::enabled()) {
1183 return;
1184 }
1185 InstructionMark im(this);
1186 relocate(post_call_nop_Relocation::spec());
1187 InlineSkippedInstructionsCounter skipCounter(this);
1188 nop();
1189 movk(zr, 0);
1190 movk(zr, 0);
1191 }
1192
1193 // these are no-ops overridden by InterpreterMacroAssembler
1194
1195 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1196
1197 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1198
1199 // Look up the method for a megamorphic invokeinterface call.
1200 // The target method is determined by <intf_klass, itable_index>.
1201 // The receiver klass is in recv_klass.
1202 // On success, the result will be in method_result, and execution falls through.
1203 // On failure, execution transfers to the given label.
1204 void MacroAssembler::lookup_interface_method(Register recv_klass,
1205 Register intf_klass,
1206 RegisterOrConstant itable_index,
1207 Register method_result,
1208 Register scan_temp,
1209 Label& L_no_such_interface,
1210 bool return_method) {
1211 assert_different_registers(recv_klass, intf_klass, scan_temp);
1212 assert_different_registers(method_result, intf_klass, scan_temp);
1213 assert(recv_klass != method_result || !return_method,
1214 "recv_klass can be destroyed when method isn't needed");
1215 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1216 "caller must use same register for non-constant itable index as for method");
1217
1218 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1219 int vtable_base = in_bytes(Klass::vtable_start_offset());
1220 int itentry_off = in_bytes(itableMethodEntry::method_offset());
1221 int scan_step = itableOffsetEntry::size() * wordSize;
1222 int vte_size = vtableEntry::size_in_bytes();
1223 assert(vte_size == wordSize, "else adjust times_vte_scale");
1224
1225 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1226
1227 // Could store the aligned, prescaled offset in the klass.
1228 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1229 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1230 add(scan_temp, scan_temp, vtable_base);
1231
1232 if (return_method) {
1233 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1234 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1235 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1236 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1237 if (itentry_off)
1238 add(recv_klass, recv_klass, itentry_off);
1239 }
1240
1241 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1242 // if (scan->interface() == intf) {
1243 // result = (klass + scan->offset() + itable_index);
1244 // }
1245 // }
1246 Label search, found_method;
1247
1248 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1249 cmp(intf_klass, method_result);
1250 br(Assembler::EQ, found_method);
1251 bind(search);
1252 // Check that the previous entry is non-null. A null entry means that
1253 // the receiver class doesn't implement the interface, and wasn't the
1254 // same as when the caller was compiled.
1255 cbz(method_result, L_no_such_interface);
1256 if (itableOffsetEntry::interface_offset() != 0) {
1257 add(scan_temp, scan_temp, scan_step);
1258 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1259 } else {
1260 ldr(method_result, Address(pre(scan_temp, scan_step)));
1261 }
1262 cmp(intf_klass, method_result);
1263 br(Assembler::NE, search);
1264
1265 bind(found_method);
1266
1267 // Got a hit.
1268 if (return_method) {
1269 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1270 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1271 }
1272 }
1273
1274 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1275 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1276 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1277 // The target method is determined by <holder_klass, itable_index>.
1278 // The receiver klass is in recv_klass.
1279 // On success, the result will be in method_result, and execution falls through.
1280 // On failure, execution transfers to the given label.
1281 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1282 Register holder_klass,
1283 Register resolved_klass,
1284 Register method_result,
1285 Register temp_itbl_klass,
1286 Register scan_temp,
1287 int itable_index,
1288 Label& L_no_such_interface) {
1289 // 'method_result' is only used as output register at the very end of this method.
1290 // Until then we can reuse it as 'holder_offset'.
1291 Register holder_offset = method_result;
1292 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1293
1294 int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1295 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1296 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1297 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1298
1299 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1300
1301 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1302 add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1303 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1304 // temp_itbl_klass = itable[0]._interface;
1305 int vtblEntrySize = vtableEntry::size_in_bytes();
1306 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1307 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1308 mov(holder_offset, zr);
1309 // scan_temp = &(itable[0]._interface)
1310 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1311
1312 // Initial checks:
1313 // - if (holder_klass != resolved_klass), go to "scan for resolved"
1314 // - if (itable[0] == holder_klass), shortcut to "holder found"
1315 // - if (itable[0] == 0), no such interface
1316 cmp(resolved_klass, holder_klass);
1317 br(Assembler::NE, L_loop_search_resolved_entry);
1318 cmp(holder_klass, temp_itbl_klass);
1319 br(Assembler::EQ, L_holder_found);
1320 cbz(temp_itbl_klass, L_no_such_interface);
1321
1322 // Loop: Look for holder_klass record in itable
1323 // do {
1324 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1325 // if (temp_itbl_klass == holder_klass) {
1326 // goto L_holder_found; // Found!
1327 // }
1328 // } while (temp_itbl_klass != 0);
1329 // goto L_no_such_interface // Not found.
1330 Label L_search_holder;
1331 bind(L_search_holder);
1332 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1333 cmp(holder_klass, temp_itbl_klass);
1334 br(Assembler::EQ, L_holder_found);
1335 cbnz(temp_itbl_klass, L_search_holder);
1336
1337 b(L_no_such_interface);
1338
1339 // Loop: Look for resolved_class record in itable
1340 // while (true) {
1341 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1342 // if (temp_itbl_klass == 0) {
1343 // goto L_no_such_interface;
1344 // }
1345 // if (temp_itbl_klass == resolved_klass) {
1346 // goto L_resolved_found; // Found!
1347 // }
1348 // if (temp_itbl_klass == holder_klass) {
1349 // holder_offset = scan_temp;
1350 // }
1351 // }
1352 //
1353 Label L_loop_search_resolved;
1354 bind(L_loop_search_resolved);
1355 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1356 bind(L_loop_search_resolved_entry);
1357 cbz(temp_itbl_klass, L_no_such_interface);
1358 cmp(resolved_klass, temp_itbl_klass);
1359 br(Assembler::EQ, L_resolved_found);
1360 cmp(holder_klass, temp_itbl_klass);
1361 br(Assembler::NE, L_loop_search_resolved);
1362 mov(holder_offset, scan_temp);
1363 b(L_loop_search_resolved);
1364
1365 // See if we already have a holder klass. If not, go and scan for it.
1366 bind(L_resolved_found);
1367 cbz(holder_offset, L_search_holder);
1368 mov(scan_temp, holder_offset);
1369
1370 // Finally, scan_temp contains holder_klass vtable offset
1371 bind(L_holder_found);
1372 ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1373 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1374 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1375 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1376 }
1377
1378 // virtual method calling
1379 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1380 RegisterOrConstant vtable_index,
1381 Register method_result) {
1382 assert(vtableEntry::size() * wordSize == 8,
1383 "adjust the scaling in the code below");
1384 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1385
1386 if (vtable_index.is_register()) {
1387 lea(method_result, Address(recv_klass,
1388 vtable_index.as_register(),
1389 Address::lsl(LogBytesPerWord)));
1390 ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1391 } else {
1392 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1393 ldr(method_result,
1394 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1395 }
1396 }
1397
1398 void MacroAssembler::check_klass_subtype(Register sub_klass,
1399 Register super_klass,
1400 Register temp_reg,
1401 Label& L_success) {
1402 Label L_failure;
1403 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
1404 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1405 bind(L_failure);
1406 }
1407
1408
1409 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1410 Register super_klass,
1411 Register temp_reg,
1412 Label* L_success,
1413 Label* L_failure,
1414 Label* L_slow_path,
1415 Register super_check_offset) {
1416 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset);
1417 bool must_load_sco = ! super_check_offset->is_valid();
1418 if (must_load_sco) {
1419 assert(temp_reg != noreg, "supply either a temp or a register offset");
1420 }
1421
1422 Label L_fallthrough;
1423 int label_nulls = 0;
1424 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1425 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1426 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1427 assert(label_nulls <= 1, "at most one null in the batch");
1428
1429 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1430 Address super_check_offset_addr(super_klass, sco_offset);
1431
1432 // Hacked jmp, which may only be used just before L_fallthrough.
1433 #define final_jmp(label) \
1434 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
1435 else b(label) /*omit semi*/
1436
1437 // If the pointers are equal, we are done (e.g., String[] elements).
1438 // This self-check enables sharing of secondary supertype arrays among
1439 // non-primary types such as array-of-interface. Otherwise, each such
1440 // type would need its own customized SSA.
1441 // We move this check to the front of the fast path because many
1442 // type checks are in fact trivially successful in this manner,
1443 // so we get a nicely predicted branch right at the start of the check.
1444 // TODO 8370341 For a direct pointer comparison, we need the refined array klass pointer
1445 cmp(sub_klass, super_klass);
1446 br(Assembler::EQ, *L_success);
1447
1448 // Check the supertype display:
1449 if (must_load_sco) {
1450 ldrw(temp_reg, super_check_offset_addr);
1451 super_check_offset = temp_reg;
1452 }
1453
1454 Address super_check_addr(sub_klass, super_check_offset);
1455 ldr(rscratch1, super_check_addr);
1456 cmp(super_klass, rscratch1); // load displayed supertype
1457 br(Assembler::EQ, *L_success);
1458
1459 // This check has worked decisively for primary supers.
1460 // Secondary supers are sought in the super_cache ('super_cache_addr').
1461 // (Secondary supers are interfaces and very deeply nested subtypes.)
1462 // This works in the same check above because of a tricky aliasing
1463 // between the super_cache and the primary super display elements.
1464 // (The 'super_check_addr' can address either, as the case requires.)
1465 // Note that the cache is updated below if it does not help us find
1466 // what we need immediately.
1467 // So if it was a primary super, we can just fail immediately.
1468 // Otherwise, it's the slow path for us (no success at this point).
1469
1470 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
1471 if (L_failure == &L_fallthrough) {
1472 cbz(rscratch1, *L_slow_path);
1473 } else {
1474 cbnz(rscratch1, *L_failure);
1475 final_jmp(*L_slow_path);
1476 }
1477
1478 bind(L_fallthrough);
1479
1480 #undef final_jmp
1481 }
1482
1483 // These two are taken from x86, but they look generally useful
1484
1485 // scans count pointer sized words at [addr] for occurrence of value,
1486 // generic
1487 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1488 Register scratch) {
1489 Label Lloop, Lexit;
1490 cbz(count, Lexit);
1491 bind(Lloop);
1492 ldr(scratch, post(addr, wordSize));
1493 cmp(value, scratch);
1494 br(EQ, Lexit);
1495 sub(count, count, 1);
1496 cbnz(count, Lloop);
1497 bind(Lexit);
1498 }
1499
1500 // scans count 4 byte words at [addr] for occurrence of value,
1501 // generic
1502 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1503 Register scratch) {
1504 Label Lloop, Lexit;
1505 cbz(count, Lexit);
1506 bind(Lloop);
1507 ldrw(scratch, post(addr, wordSize));
1508 cmpw(value, scratch);
1509 br(EQ, Lexit);
1510 sub(count, count, 1);
1511 cbnz(count, Lloop);
1512 bind(Lexit);
1513 }
1514
1515 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
1516 Register super_klass,
1517 Register temp_reg,
1518 Register temp2_reg,
1519 Label* L_success,
1520 Label* L_failure,
1521 bool set_cond_codes) {
1522 // NB! Callers may assume that, when temp2_reg is a valid register,
1523 // this code sets it to a nonzero value.
1524
1525 assert_different_registers(sub_klass, super_klass, temp_reg);
1526 if (temp2_reg != noreg)
1527 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1528 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1529
1530 Label L_fallthrough;
1531 int label_nulls = 0;
1532 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1533 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1534 assert(label_nulls <= 1, "at most one null in the batch");
1535
1536 // a couple of useful fields in sub_klass:
1537 int ss_offset = in_bytes(Klass::secondary_supers_offset());
1538 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1539 Address secondary_supers_addr(sub_klass, ss_offset);
1540 Address super_cache_addr( sub_klass, sc_offset);
1541
1542 BLOCK_COMMENT("check_klass_subtype_slow_path");
1543
1544 // Do a linear scan of the secondary super-klass chain.
1545 // This code is rarely used, so simplicity is a virtue here.
1546 // The repne_scan instruction uses fixed registers, which we must spill.
1547 // Don't worry too much about pre-existing connections with the input regs.
1548
1549 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1550 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1551
1552 RegSet pushed_registers;
1553 if (!IS_A_TEMP(r2)) pushed_registers += r2;
1554 if (!IS_A_TEMP(r5)) pushed_registers += r5;
1555
1556 if (super_klass != r0) {
1557 if (!IS_A_TEMP(r0)) pushed_registers += r0;
1558 }
1559
1560 push(pushed_registers, sp);
1561
1562 // Get super_klass value into r0 (even if it was in r5 or r2).
1563 if (super_klass != r0) {
1564 mov(r0, super_klass);
1565 }
1566
1567 #ifndef PRODUCT
1568 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1569 #endif //PRODUCT
1570
1571 // We will consult the secondary-super array.
1572 ldr(r5, secondary_supers_addr);
1573 // Load the array length.
1574 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1575 // Skip to start of data.
1576 add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1577
1578 cmp(sp, zr); // Clear Z flag; SP is never zero
1579 // Scan R2 words at [R5] for an occurrence of R0.
1580 // Set NZ/Z based on last compare.
1581 repne_scan(r5, r0, r2, rscratch1);
1582
1583 // Unspill the temp. registers:
1584 pop(pushed_registers, sp);
1585
1586 br(Assembler::NE, *L_failure);
1587
1588 // Success. Cache the super we found and proceed in triumph.
1589
1590 if (UseSecondarySupersCache) {
1591 str(super_klass, super_cache_addr);
1592 }
1593
1594 if (L_success != &L_fallthrough) {
1595 b(*L_success);
1596 }
1597
1598 #undef IS_A_TEMP
1599
1600 bind(L_fallthrough);
1601 }
1602
1603 // If Register r is invalid, remove a new register from
1604 // available_regs, and add new register to regs_to_push.
1605 Register MacroAssembler::allocate_if_noreg(Register r,
1606 RegSetIterator<Register> &available_regs,
1607 RegSet ®s_to_push) {
1608 if (!r->is_valid()) {
1609 r = *available_regs++;
1610 regs_to_push += r;
1611 }
1612 return r;
1613 }
1614
1615 // check_klass_subtype_slow_path_table() looks for super_klass in the
1616 // hash table belonging to super_klass, branching to L_success or
1617 // L_failure as appropriate. This is essentially a shim which
1618 // allocates registers as necessary then calls
1619 // lookup_secondary_supers_table() to do the work. Any of the temp
1620 // regs may be noreg, in which case this logic will chooses some
1621 // registers push and pop them from the stack.
1622 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
1623 Register super_klass,
1624 Register temp_reg,
1625 Register temp2_reg,
1626 Register temp3_reg,
1627 Register result_reg,
1628 FloatRegister vtemp,
1629 Label* L_success,
1630 Label* L_failure,
1631 bool set_cond_codes) {
1632 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
1633
1634 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1635
1636 Label L_fallthrough;
1637 int label_nulls = 0;
1638 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1639 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1640 assert(label_nulls <= 1, "at most one null in the batch");
1641
1642 BLOCK_COMMENT("check_klass_subtype_slow_path");
1643
1644 RegSetIterator<Register> available_regs
1645 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin();
1646
1647 RegSet pushed_regs;
1648
1649 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
1650 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
1651 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
1652 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
1653
1654 push(pushed_regs, sp);
1655
1656 lookup_secondary_supers_table_var(sub_klass,
1657 super_klass,
1658 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg,
1659 nullptr);
1660 cmp(result_reg, zr);
1661
1662 // Unspill the temp. registers:
1663 pop(pushed_regs, sp);
1664
1665 // NB! Callers may assume that, when set_cond_codes is true, this
1666 // code sets temp2_reg to a nonzero value.
1667 if (set_cond_codes) {
1668 mov(temp2_reg, 1);
1669 }
1670
1671 br(Assembler::NE, *L_failure);
1672
1673 if (L_success != &L_fallthrough) {
1674 b(*L_success);
1675 }
1676
1677 bind(L_fallthrough);
1678 }
1679
1680 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1681 Register super_klass,
1682 Register temp_reg,
1683 Register temp2_reg,
1684 Label* L_success,
1685 Label* L_failure,
1686 bool set_cond_codes) {
1687 if (UseSecondarySupersTable) {
1688 check_klass_subtype_slow_path_table
1689 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg,
1690 /*vtemp*/fnoreg,
1691 L_success, L_failure, set_cond_codes);
1692 } else {
1693 check_klass_subtype_slow_path_linear
1694 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes);
1695 }
1696 }
1697
1698
1699 // Ensure that the inline code and the stub are using the same registers.
1700 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
1701 do { \
1702 assert(r_super_klass == r0 && \
1703 r_array_base == r1 && \
1704 r_array_length == r2 && \
1705 (r_array_index == r3 || r_array_index == noreg) && \
1706 (r_sub_klass == r4 || r_sub_klass == noreg) && \
1707 (r_bitmap == rscratch2 || r_bitmap == noreg) && \
1708 (result == r5 || result == noreg), "registers must match aarch64.ad"); \
1709 } while(0)
1710
1711 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
1712 Register r_super_klass,
1713 Register temp1,
1714 Register temp2,
1715 Register temp3,
1716 FloatRegister vtemp,
1717 Register result,
1718 u1 super_klass_slot,
1719 bool stub_is_near) {
1720 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1721
1722 Label L_fallthrough;
1723
1724 BLOCK_COMMENT("lookup_secondary_supers_table {");
1725
1726 const Register
1727 r_array_base = temp1, // r1
1728 r_array_length = temp2, // r2
1729 r_array_index = temp3, // r3
1730 r_bitmap = rscratch2;
1731
1732 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1733
1734 u1 bit = super_klass_slot;
1735
1736 // Make sure that result is nonzero if the TBZ below misses.
1737 mov(result, 1);
1738
1739 // We're going to need the bitmap in a vector reg and in a core reg,
1740 // so load both now.
1741 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1742 if (bit != 0) {
1743 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1744 }
1745 // First check the bitmap to see if super_klass might be present. If
1746 // the bit is zero, we are certain that super_klass is not one of
1747 // the secondary supers.
1748 tbz(r_bitmap, bit, L_fallthrough);
1749
1750 // Get the first array index that can contain super_klass into r_array_index.
1751 if (bit != 0) {
1752 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1753 cnt(vtemp, T8B, vtemp);
1754 addv(vtemp, T8B, vtemp);
1755 fmovd(r_array_index, vtemp);
1756 } else {
1757 mov(r_array_index, (u1)1);
1758 }
1759 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1760
1761 // We will consult the secondary-super array.
1762 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1763
1764 // The value i in r_array_index is >= 1, so even though r_array_base
1765 // points to the length, we don't need to adjust it to point to the
1766 // data.
1767 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1768 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1769
1770 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1771 eor(result, result, r_super_klass);
1772 cbz(result, L_fallthrough); // Found a match
1773
1774 // Is there another entry to check? Consult the bitmap.
1775 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1776
1777 // Linear probe.
1778 if (bit != 0) {
1779 ror(r_bitmap, r_bitmap, bit);
1780 }
1781
1782 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1783 // The next slot to be inspected, by the stub we're about to call,
1784 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1785 // have been checked.
1786 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1787 if (stub_is_near) {
1788 bl(stub);
1789 } else {
1790 address call = trampoline_call(stub);
1791 if (call == nullptr) {
1792 return false; // trampoline allocation failed
1793 }
1794 }
1795
1796 BLOCK_COMMENT("} lookup_secondary_supers_table");
1797
1798 bind(L_fallthrough);
1799
1800 if (VerifySecondarySupers) {
1801 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1802 temp1, temp2, result); // r1, r2, r5
1803 }
1804 return true;
1805 }
1806
1807 // At runtime, return 0 in result if r_super_klass is a superclass of
1808 // r_sub_klass, otherwise return nonzero. Use this version of
1809 // lookup_secondary_supers_table() if you don't know ahead of time
1810 // which superclass will be searched for. Used by interpreter and
1811 // runtime stubs. It is larger and has somewhat greater latency than
1812 // the version above, which takes a constant super_klass_slot.
1813 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
1814 Register r_super_klass,
1815 Register temp1,
1816 Register temp2,
1817 Register temp3,
1818 FloatRegister vtemp,
1819 Register result,
1820 Label *L_success) {
1821 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1822
1823 Label L_fallthrough;
1824
1825 BLOCK_COMMENT("lookup_secondary_supers_table {");
1826
1827 const Register
1828 r_array_index = temp3,
1829 slot = rscratch1,
1830 r_bitmap = rscratch2;
1831
1832 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
1833
1834 // Make sure that result is nonzero if the test below misses.
1835 mov(result, 1);
1836
1837 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1838
1839 // First check the bitmap to see if super_klass might be present. If
1840 // the bit is zero, we are certain that super_klass is not one of
1841 // the secondary supers.
1842
1843 // This next instruction is equivalent to:
1844 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1845 // sub(temp2, tmp_reg, slot);
1846 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1847 lslv(temp2, r_bitmap, temp2);
1848 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough);
1849
1850 bool must_save_v0 = (vtemp == fnoreg);
1851 if (must_save_v0) {
1852 // temp1 and result are free, so use them to preserve vtemp
1853 vtemp = v0;
1854 mov(temp1, vtemp, D, 0);
1855 mov(result, vtemp, D, 1);
1856 }
1857
1858 // Get the first array index that can contain super_klass into r_array_index.
1859 mov(vtemp, D, 0, temp2);
1860 cnt(vtemp, T8B, vtemp);
1861 addv(vtemp, T8B, vtemp);
1862 mov(r_array_index, vtemp, D, 0);
1863
1864 if (must_save_v0) {
1865 mov(vtemp, D, 0, temp1 );
1866 mov(vtemp, D, 1, result);
1867 }
1868
1869 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1870
1871 const Register
1872 r_array_base = temp1,
1873 r_array_length = temp2;
1874
1875 // The value i in r_array_index is >= 1, so even though r_array_base
1876 // points to the length, we don't need to adjust it to point to the
1877 // data.
1878 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1879 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1880
1881 // We will consult the secondary-super array.
1882 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1883
1884 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1885 eor(result, result, r_super_klass);
1886 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match
1887
1888 // Is there another entry to check? Consult the bitmap.
1889 rorv(r_bitmap, r_bitmap, slot);
1890 // rol(r_bitmap, r_bitmap, 1);
1891 tbz(r_bitmap, 1, L_fallthrough);
1892
1893 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1894 // The next slot to be inspected, by the logic we're about to call,
1895 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1896 // have been checked.
1897 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index,
1898 r_bitmap, r_array_length, result, /*is_stub*/false);
1899
1900 BLOCK_COMMENT("} lookup_secondary_supers_table");
1901
1902 bind(L_fallthrough);
1903
1904 if (VerifySecondarySupers) {
1905 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1906 temp1, temp2, result); // r1, r2, r5
1907 }
1908
1909 if (L_success) {
1910 cbz(result, *L_success);
1911 }
1912 }
1913
1914 // Called by code generated by check_klass_subtype_slow_path
1915 // above. This is called when there is a collision in the hashed
1916 // lookup in the secondary supers array.
1917 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1918 Register r_array_base,
1919 Register r_array_index,
1920 Register r_bitmap,
1921 Register temp1,
1922 Register result,
1923 bool is_stub) {
1924 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1925
1926 const Register
1927 r_array_length = temp1,
1928 r_sub_klass = noreg; // unused
1929
1930 if (is_stub) {
1931 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1932 }
1933
1934 Label L_fallthrough, L_huge;
1935
1936 // Load the array length.
1937 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1938 // And adjust the array base to point to the data.
1939 // NB! Effectively increments current slot index by 1.
1940 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1941 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1942
1943 // The bitmap is full to bursting.
1944 // Implicit invariant: BITMAP_FULL implies (length > 0)
1945 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1946 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2));
1947 br(GT, L_huge);
1948
1949 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
1950 // current slot (at secondary_supers[r_array_index]) has not yet
1951 // been inspected, and r_array_index may be out of bounds if we
1952 // wrapped around the end of the array.
1953
1954 { // This is conventional linear probing, but instead of terminating
1955 // when a null entry is found in the table, we maintain a bitmap
1956 // in which a 0 indicates missing entries.
1957 // As long as the bitmap is not completely full,
1958 // array_length == popcount(bitmap). The array_length check above
1959 // guarantees there are 0s in the bitmap, so the loop eventually
1960 // terminates.
1961 Label L_loop;
1962 bind(L_loop);
1963
1964 // Check for wraparound.
1965 cmp(r_array_index, r_array_length);
1966 csel(r_array_index, zr, r_array_index, GE);
1967
1968 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1969 eor(result, rscratch1, r_super_klass);
1970 cbz(result, L_fallthrough);
1971
1972 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1973
1974 ror(r_bitmap, r_bitmap, 1);
1975 add(r_array_index, r_array_index, 1);
1976 b(L_loop);
1977 }
1978
1979 { // Degenerate case: more than 64 secondary supers.
1980 // FIXME: We could do something smarter here, maybe a vectorized
1981 // comparison or a binary search, but is that worth any added
1982 // complexity?
1983 bind(L_huge);
1984 cmp(sp, zr); // Clear Z flag; SP is never zero
1985 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1986 cset(result, NE); // result == 0 iff we got a match.
1987 }
1988
1989 bind(L_fallthrough);
1990 }
1991
1992 // Make sure that the hashed lookup and a linear scan agree.
1993 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1994 Register r_super_klass,
1995 Register temp1,
1996 Register temp2,
1997 Register result) {
1998 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1999
2000 const Register
2001 r_array_base = temp1,
2002 r_array_length = temp2,
2003 r_array_index = noreg, // unused
2004 r_bitmap = noreg; // unused
2005
2006 BLOCK_COMMENT("verify_secondary_supers_table {");
2007
2008 // We will consult the secondary-super array.
2009 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
2010
2011 // Load the array length.
2012 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
2013 // And adjust the array base to point to the data.
2014 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
2015
2016 cmp(sp, zr); // Clear Z flag; SP is never zero
2017 // Scan R2 words at [R5] for an occurrence of R0.
2018 // Set NZ/Z based on last compare.
2019 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
2020 // rscratch1 == 0 iff we got a match.
2021 cset(rscratch1, NE);
2022
2023 Label passed;
2024 cmp(result, zr);
2025 cset(result, NE); // normalize result to 0/1 for comparison
2026
2027 cmp(rscratch1, result);
2028 br(EQ, passed);
2029 {
2030 mov(r0, r_super_klass); // r0 <- r0
2031 mov(r1, r_sub_klass); // r1 <- r4
2032 mov(r2, /*expected*/rscratch1); // r2 <- r8
2033 mov(r3, result); // r3 <- r5
2034 mov(r4, (address)("mismatch")); // r4 <- const
2035 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
2036 should_not_reach_here();
2037 }
2038 bind(passed);
2039
2040 BLOCK_COMMENT("} verify_secondary_supers_table");
2041 }
2042
2043 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
2044 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
2045 assert_different_registers(klass, rthread, scratch);
2046
2047 Label L_fallthrough, L_tmp;
2048 if (L_fast_path == nullptr) {
2049 L_fast_path = &L_fallthrough;
2050 } else if (L_slow_path == nullptr) {
2051 L_slow_path = &L_fallthrough;
2052 }
2053 // Fast path check: class is fully initialized
2054 lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
2055 ldarb(scratch, scratch);
2056 cmp(scratch, InstanceKlass::fully_initialized);
2057 br(Assembler::EQ, *L_fast_path);
2058
2059 // Fast path check: current thread is initializer thread
2060 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
2061 cmp(rthread, scratch);
2062
2063 if (L_slow_path == &L_fallthrough) {
2064 br(Assembler::EQ, *L_fast_path);
2065 bind(*L_slow_path);
2066 } else if (L_fast_path == &L_fallthrough) {
2067 br(Assembler::NE, *L_slow_path);
2068 bind(*L_fast_path);
2069 } else {
2070 Unimplemented();
2071 }
2072 }
2073
2074 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
2075 if (!VerifyOops || VerifyAdapterSharing) {
2076 // Below address of the code string confuses VerifyAdapterSharing
2077 // because it may differ between otherwise equivalent adapters.
2078 return;
2079 }
2080
2081 // Pass register number to verify_oop_subroutine
2082 const char* b = nullptr;
2083 {
2084 ResourceMark rm;
2085 stringStream ss;
2086 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
2087 b = code_string(ss.as_string());
2088 }
2089 BLOCK_COMMENT("verify_oop {");
2090
2091 strip_return_address(); // This might happen within a stack frame.
2092 protect_return_address();
2093 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2094 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2095
2096 mov(r0, reg);
2097 movptr(rscratch1, (uintptr_t)(address)b);
2098
2099 // call indirectly to solve generation ordering problem
2100 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2101 ldr(rscratch2, Address(rscratch2));
2102 blr(rscratch2);
2103
2104 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2105 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2106 authenticate_return_address();
2107
2108 BLOCK_COMMENT("} verify_oop");
2109 }
2110
2111 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
2112 if (!VerifyOops || VerifyAdapterSharing) {
2113 // Below address of the code string confuses VerifyAdapterSharing
2114 // because it may differ between otherwise equivalent adapters.
2115 return;
2116 }
2117
2118 const char* b = nullptr;
2119 {
2120 ResourceMark rm;
2121 stringStream ss;
2122 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
2123 b = code_string(ss.as_string());
2124 }
2125 BLOCK_COMMENT("verify_oop_addr {");
2126
2127 strip_return_address(); // This might happen within a stack frame.
2128 protect_return_address();
2129 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2130 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2131
2132 // addr may contain sp so we will have to adjust it based on the
2133 // pushes that we just did.
2134 if (addr.uses(sp)) {
2135 lea(r0, addr);
2136 ldr(r0, Address(r0, 4 * wordSize));
2137 } else {
2138 ldr(r0, addr);
2139 }
2140 movptr(rscratch1, (uintptr_t)(address)b);
2141
2142 // call indirectly to solve generation ordering problem
2143 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2144 ldr(rscratch2, Address(rscratch2));
2145 blr(rscratch2);
2146
2147 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2148 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2149 authenticate_return_address();
2150
2151 BLOCK_COMMENT("} verify_oop_addr");
2152 }
2153
2154 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2155 int extra_slot_offset) {
2156 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2157 int stackElementSize = Interpreter::stackElementSize;
2158 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
2159 #ifdef ASSERT
2160 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2161 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2162 #endif
2163 if (arg_slot.is_constant()) {
2164 return Address(esp, arg_slot.as_constant() * stackElementSize
2165 + offset);
2166 } else {
2167 add(rscratch1, esp, arg_slot.as_register(),
2168 ext::uxtx, exact_log2(stackElementSize));
2169 return Address(rscratch1, offset);
2170 }
2171 }
2172
2173 void MacroAssembler::call_VM_leaf_base(address entry_point,
2174 int number_of_arguments,
2175 Label *retaddr) {
2176 Label E, L;
2177
2178 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2179
2180 mov(rscratch1, RuntimeAddress(entry_point));
2181 blr(rscratch1);
2182 if (retaddr)
2183 bind(*retaddr);
2184
2185 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2186 }
2187
2188 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2189 call_VM_leaf_base(entry_point, number_of_arguments);
2190 }
2191
2192 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2193 pass_arg0(this, arg_0);
2194 call_VM_leaf_base(entry_point, 1);
2195 }
2196
2197 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2198 assert_different_registers(arg_1, c_rarg0);
2199 pass_arg0(this, arg_0);
2200 pass_arg1(this, arg_1);
2201 call_VM_leaf_base(entry_point, 2);
2202 }
2203
2204 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
2205 Register arg_1, Register arg_2) {
2206 assert_different_registers(arg_1, c_rarg0);
2207 assert_different_registers(arg_2, c_rarg0, c_rarg1);
2208 pass_arg0(this, arg_0);
2209 pass_arg1(this, arg_1);
2210 pass_arg2(this, arg_2);
2211 call_VM_leaf_base(entry_point, 3);
2212 }
2213
2214 void MacroAssembler::super_call_VM_leaf(address entry_point) {
2215 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2216 }
2217
2218 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2219 pass_arg0(this, arg_0);
2220 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2221 }
2222
2223 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2224
2225 assert_different_registers(arg_0, c_rarg1);
2226 pass_arg1(this, arg_1);
2227 pass_arg0(this, arg_0);
2228 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2229 }
2230
2231 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2232 assert_different_registers(arg_0, c_rarg1, c_rarg2);
2233 assert_different_registers(arg_1, c_rarg2);
2234 pass_arg2(this, arg_2);
2235 pass_arg1(this, arg_1);
2236 pass_arg0(this, arg_0);
2237 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2238 }
2239
2240 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2241 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2242 assert_different_registers(arg_1, c_rarg2, c_rarg3);
2243 assert_different_registers(arg_2, c_rarg3);
2244 pass_arg3(this, arg_3);
2245 pass_arg2(this, arg_2);
2246 pass_arg1(this, arg_1);
2247 pass_arg0(this, arg_0);
2248 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2249 }
2250
2251 void MacroAssembler::null_check(Register reg, int offset) {
2252 if (needs_explicit_null_check(offset)) {
2253 // provoke OS null exception if reg is null by
2254 // accessing M[reg] w/o changing any registers
2255 // NOTE: this is plenty to provoke a segv
2256 ldr(zr, Address(reg));
2257 } else {
2258 // nothing to do, (later) access of M[reg + offset]
2259 // will provoke OS null exception if reg is null
2260 }
2261 }
2262
2263 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2264 assert_different_registers(markword, rscratch2);
2265 mov(rscratch2, markWord::inline_type_mask_in_place);
2266 andr(markword, markword, rscratch2);
2267 mov(rscratch2, markWord::inline_type_pattern);
2268 cmp(markword, rscratch2);
2269 br(Assembler::EQ, is_inline_type);
2270 }
2271
2272 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2273 assert_different_registers(tmp, rscratch1);
2274 if (can_be_null) {
2275 cbz(object, not_inline_type);
2276 }
2277 const int is_inline_type_mask = markWord::inline_type_pattern;
2278 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2279 mov(rscratch1, is_inline_type_mask);
2280 andr(tmp, tmp, rscratch1);
2281 cmp(tmp, rscratch1);
2282 br(Assembler::NE, not_inline_type);
2283 }
2284
2285 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2286 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2287 tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
2288 }
2289
2290 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2291 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2292 tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
2293 }
2294
2295 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2296 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2297 tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat);
2298 }
2299
2300 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2301 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2302 tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker);
2303 }
2304
2305 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2306 Label test_mark_word;
2307 // load mark word
2308 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2309 // check displaced
2310 tst(temp_reg, markWord::unlocked_value);
2311 br(Assembler::NE, test_mark_word);
2312 // slow path use klass prototype
2313 load_prototype_header(temp_reg, oop);
2314
2315 bind(test_mark_word);
2316 andr(temp_reg, temp_reg, test_bit);
2317 if (jmp_set) {
2318 cbnz(temp_reg, jmp_label);
2319 } else {
2320 cbz(temp_reg, jmp_label);
2321 }
2322 }
2323
2324 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) {
2325 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2326 }
2327
2328 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2329 Label&is_non_flat_array) {
2330 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2331 }
2332
2333 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
2334 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2335 }
2336
2337 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2338 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2339 }
2340
2341 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2342 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2343 br(Assembler::NE, is_flat_array);
2344 }
2345
2346 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2347 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2348 br(Assembler::EQ, is_non_flat_array);
2349 }
2350
2351 // MacroAssembler protected routines needed to implement
2352 // public methods
2353
2354 void MacroAssembler::mov(Register r, Address dest) {
2355 code_section()->relocate(pc(), dest.rspec());
2356 uint64_t imm64 = (uint64_t)dest.target();
2357 movptr(r, imm64);
2358 }
2359
2360 // Move a constant pointer into r. In AArch64 mode the virtual
2361 // address space is 48 bits in size, so we only need three
2362 // instructions to create a patchable instruction sequence that can
2363 // reach anywhere.
2364 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2365 #ifndef PRODUCT
2366 {
2367 char buffer[64];
2368 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2369 block_comment(buffer);
2370 }
2371 #endif
2372 assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2373 movz(r, imm64 & 0xffff);
2374 imm64 >>= 16;
2375 movk(r, imm64 & 0xffff, 16);
2376 imm64 >>= 16;
2377 movk(r, imm64 & 0xffff, 32);
2378 }
2379
2380 // Macro to mov replicated immediate to vector register.
2381 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2382 // the upper 56/48/32 bits must be zeros for B/H/S type.
2383 // Vd will get the following values for different arrangements in T
2384 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh
2385 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh
2386 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh
2387 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh
2388 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
2389 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
2390 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh
2391 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh
2392 // Clobbers rscratch1
2393 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2394 assert(T != T1Q, "unsupported");
2395 if (T == T1D || T == T2D) {
2396 int imm = operand_valid_for_movi_immediate(imm64, T);
2397 if (-1 != imm) {
2398 movi(Vd, T, imm);
2399 } else {
2400 mov(rscratch1, imm64);
2401 dup(Vd, T, rscratch1);
2402 }
2403 return;
2404 }
2405
2406 #ifdef ASSERT
2407 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2408 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2409 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2410 #endif
2411 int shift = operand_valid_for_movi_immediate(imm64, T);
2412 uint32_t imm32 = imm64 & 0xffffffffULL;
2413 if (shift >= 0) {
2414 movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2415 } else {
2416 movw(rscratch1, imm32);
2417 dup(Vd, T, rscratch1);
2418 }
2419 }
2420
2421 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2422 {
2423 #ifndef PRODUCT
2424 {
2425 char buffer[64];
2426 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2427 block_comment(buffer);
2428 }
2429 #endif
2430 if (operand_valid_for_logical_immediate(false, imm64)) {
2431 orr(dst, zr, imm64);
2432 } else {
2433 // we can use a combination of MOVZ or MOVN with
2434 // MOVK to build up the constant
2435 uint64_t imm_h[4];
2436 int zero_count = 0;
2437 int neg_count = 0;
2438 int i;
2439 for (i = 0; i < 4; i++) {
2440 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2441 if (imm_h[i] == 0) {
2442 zero_count++;
2443 } else if (imm_h[i] == 0xffffL) {
2444 neg_count++;
2445 }
2446 }
2447 if (zero_count == 4) {
2448 // one MOVZ will do
2449 movz(dst, 0);
2450 } else if (neg_count == 4) {
2451 // one MOVN will do
2452 movn(dst, 0);
2453 } else if (zero_count == 3) {
2454 for (i = 0; i < 4; i++) {
2455 if (imm_h[i] != 0L) {
2456 movz(dst, (uint32_t)imm_h[i], (i << 4));
2457 break;
2458 }
2459 }
2460 } else if (neg_count == 3) {
2461 // one MOVN will do
2462 for (int i = 0; i < 4; i++) {
2463 if (imm_h[i] != 0xffffL) {
2464 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2465 break;
2466 }
2467 }
2468 } else if (zero_count == 2) {
2469 // one MOVZ and one MOVK will do
2470 for (i = 0; i < 3; i++) {
2471 if (imm_h[i] != 0L) {
2472 movz(dst, (uint32_t)imm_h[i], (i << 4));
2473 i++;
2474 break;
2475 }
2476 }
2477 for (;i < 4; i++) {
2478 if (imm_h[i] != 0L) {
2479 movk(dst, (uint32_t)imm_h[i], (i << 4));
2480 }
2481 }
2482 } else if (neg_count == 2) {
2483 // one MOVN and one MOVK will do
2484 for (i = 0; i < 4; i++) {
2485 if (imm_h[i] != 0xffffL) {
2486 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2487 i++;
2488 break;
2489 }
2490 }
2491 for (;i < 4; i++) {
2492 if (imm_h[i] != 0xffffL) {
2493 movk(dst, (uint32_t)imm_h[i], (i << 4));
2494 }
2495 }
2496 } else if (zero_count == 1) {
2497 // one MOVZ and two MOVKs will do
2498 for (i = 0; i < 4; i++) {
2499 if (imm_h[i] != 0L) {
2500 movz(dst, (uint32_t)imm_h[i], (i << 4));
2501 i++;
2502 break;
2503 }
2504 }
2505 for (;i < 4; i++) {
2506 if (imm_h[i] != 0x0L) {
2507 movk(dst, (uint32_t)imm_h[i], (i << 4));
2508 }
2509 }
2510 } else if (neg_count == 1) {
2511 // one MOVN and two MOVKs will do
2512 for (i = 0; i < 4; i++) {
2513 if (imm_h[i] != 0xffffL) {
2514 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2515 i++;
2516 break;
2517 }
2518 }
2519 for (;i < 4; i++) {
2520 if (imm_h[i] != 0xffffL) {
2521 movk(dst, (uint32_t)imm_h[i], (i << 4));
2522 }
2523 }
2524 } else {
2525 // use a MOVZ and 3 MOVKs (makes it easier to debug)
2526 movz(dst, (uint32_t)imm_h[0], 0);
2527 for (i = 1; i < 4; i++) {
2528 movk(dst, (uint32_t)imm_h[i], (i << 4));
2529 }
2530 }
2531 }
2532 }
2533
2534 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2535 {
2536 #ifndef PRODUCT
2537 {
2538 char buffer[64];
2539 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2540 block_comment(buffer);
2541 }
2542 #endif
2543 if (operand_valid_for_logical_immediate(true, imm32)) {
2544 orrw(dst, zr, imm32);
2545 } else {
2546 // we can use MOVZ, MOVN or two calls to MOVK to build up the
2547 // constant
2548 uint32_t imm_h[2];
2549 imm_h[0] = imm32 & 0xffff;
2550 imm_h[1] = ((imm32 >> 16) & 0xffff);
2551 if (imm_h[0] == 0) {
2552 movzw(dst, imm_h[1], 16);
2553 } else if (imm_h[0] == 0xffff) {
2554 movnw(dst, imm_h[1] ^ 0xffff, 16);
2555 } else if (imm_h[1] == 0) {
2556 movzw(dst, imm_h[0], 0);
2557 } else if (imm_h[1] == 0xffff) {
2558 movnw(dst, imm_h[0] ^ 0xffff, 0);
2559 } else {
2560 // use a MOVZ and MOVK (makes it easier to debug)
2561 movzw(dst, imm_h[0], 0);
2562 movkw(dst, imm_h[1], 16);
2563 }
2564 }
2565 }
2566
2567 // Form an address from base + offset in Rd. Rd may or may
2568 // not actually be used: you must use the Address that is returned.
2569 // It is up to you to ensure that the shift provided matches the size
2570 // of your data.
2571 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2572 if (Address::offset_ok_for_immed(byte_offset, shift))
2573 // It fits; no need for any heroics
2574 return Address(base, byte_offset);
2575
2576 // Don't do anything clever with negative or misaligned offsets
2577 unsigned mask = (1 << shift) - 1;
2578 if (byte_offset < 0 || byte_offset & mask) {
2579 mov(Rd, byte_offset);
2580 add(Rd, base, Rd);
2581 return Address(Rd);
2582 }
2583
2584 // See if we can do this with two 12-bit offsets
2585 {
2586 uint64_t word_offset = byte_offset >> shift;
2587 uint64_t masked_offset = word_offset & 0xfff000;
2588 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2589 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2590 add(Rd, base, masked_offset << shift);
2591 word_offset -= masked_offset;
2592 return Address(Rd, word_offset << shift);
2593 }
2594 }
2595
2596 // Do it the hard way
2597 mov(Rd, byte_offset);
2598 add(Rd, base, Rd);
2599 return Address(Rd);
2600 }
2601
2602 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2603 bool want_remainder, Register scratch)
2604 {
2605 // Full implementation of Java idiv and irem. The function
2606 // returns the (pc) offset of the div instruction - may be needed
2607 // for implicit exceptions.
2608 //
2609 // constraint : ra/rb =/= scratch
2610 // normal case
2611 //
2612 // input : ra: dividend
2613 // rb: divisor
2614 //
2615 // result: either
2616 // quotient (= ra idiv rb)
2617 // remainder (= ra irem rb)
2618
2619 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2620
2621 int idivl_offset = offset();
2622 if (! want_remainder) {
2623 sdivw(result, ra, rb);
2624 } else {
2625 sdivw(scratch, ra, rb);
2626 Assembler::msubw(result, scratch, rb, ra);
2627 }
2628
2629 return idivl_offset;
2630 }
2631
2632 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2633 bool want_remainder, Register scratch)
2634 {
2635 // Full implementation of Java ldiv and lrem. The function
2636 // returns the (pc) offset of the div instruction - may be needed
2637 // for implicit exceptions.
2638 //
2639 // constraint : ra/rb =/= scratch
2640 // normal case
2641 //
2642 // input : ra: dividend
2643 // rb: divisor
2644 //
2645 // result: either
2646 // quotient (= ra idiv rb)
2647 // remainder (= ra irem rb)
2648
2649 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2650
2651 int idivq_offset = offset();
2652 if (! want_remainder) {
2653 sdiv(result, ra, rb);
2654 } else {
2655 sdiv(scratch, ra, rb);
2656 Assembler::msub(result, scratch, rb, ra);
2657 }
2658
2659 return idivq_offset;
2660 }
2661
2662 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2663 address prev = pc() - NativeMembar::instruction_size;
2664 address last = code()->last_insn();
2665 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2666 NativeMembar *bar = NativeMembar_at(prev);
2667 if (AlwaysMergeDMB) {
2668 bar->set_kind(bar->get_kind() | order_constraint);
2669 BLOCK_COMMENT("merged membar(always)");
2670 return;
2671 }
2672 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2673 // doing so would introduce a StoreLoad which the caller did not
2674 // intend
2675 if (bar->get_kind() == order_constraint
2676 || bar->get_kind() == AnyAny
2677 || order_constraint == AnyAny) {
2678 // We are merging two memory barrier instructions. On AArch64 we
2679 // can do this simply by ORing them together.
2680 bar->set_kind(bar->get_kind() | order_constraint);
2681 BLOCK_COMMENT("merged membar");
2682 return;
2683 } else {
2684 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2685 // We need check the last 2 instructions
2686 address prev2 = prev - NativeMembar::instruction_size;
2687 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2688 NativeMembar *bar2 = NativeMembar_at(prev2);
2689 assert(bar2->get_kind() == order_constraint, "it should be merged before");
2690 BLOCK_COMMENT("merged membar(elided)");
2691 return;
2692 }
2693 }
2694 }
2695 code()->set_last_insn(pc());
2696 dmb(Assembler::barrier(order_constraint));
2697 }
2698
2699 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2700 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2701 merge_ldst(rt, adr, size_in_bytes, is_store);
2702 code()->clear_last_insn();
2703 return true;
2704 } else {
2705 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2706 const uint64_t mask = size_in_bytes - 1;
2707 if (adr.getMode() == Address::base_plus_offset &&
2708 (adr.offset() & mask) == 0) { // only supports base_plus_offset.
2709 code()->set_last_insn(pc());
2710 }
2711 return false;
2712 }
2713 }
2714
2715 void MacroAssembler::ldr(Register Rx, const Address &adr) {
2716 // We always try to merge two adjacent loads into one ldp.
2717 if (!try_merge_ldst(Rx, adr, 8, false)) {
2718 Assembler::ldr(Rx, adr);
2719 }
2720 }
2721
2722 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2723 // We always try to merge two adjacent loads into one ldp.
2724 if (!try_merge_ldst(Rw, adr, 4, false)) {
2725 Assembler::ldrw(Rw, adr);
2726 }
2727 }
2728
2729 void MacroAssembler::str(Register Rx, const Address &adr) {
2730 // We always try to merge two adjacent stores into one stp.
2731 if (!try_merge_ldst(Rx, adr, 8, true)) {
2732 Assembler::str(Rx, adr);
2733 }
2734 }
2735
2736 void MacroAssembler::strw(Register Rw, const Address &adr) {
2737 // We always try to merge two adjacent stores into one stp.
2738 if (!try_merge_ldst(Rw, adr, 4, true)) {
2739 Assembler::strw(Rw, adr);
2740 }
2741 }
2742
2743 // MacroAssembler routines found actually to be needed
2744
2745 void MacroAssembler::push(Register src)
2746 {
2747 str(src, Address(pre(esp, -1 * wordSize)));
2748 }
2749
2750 void MacroAssembler::pop(Register dst)
2751 {
2752 ldr(dst, Address(post(esp, 1 * wordSize)));
2753 }
2754
2755 // Note: load_unsigned_short used to be called load_unsigned_word.
2756 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2757 int off = offset();
2758 ldrh(dst, src);
2759 return off;
2760 }
2761
2762 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2763 int off = offset();
2764 ldrb(dst, src);
2765 return off;
2766 }
2767
2768 int MacroAssembler::load_signed_short(Register dst, Address src) {
2769 int off = offset();
2770 ldrsh(dst, src);
2771 return off;
2772 }
2773
2774 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2775 int off = offset();
2776 ldrsb(dst, src);
2777 return off;
2778 }
2779
2780 int MacroAssembler::load_signed_short32(Register dst, Address src) {
2781 int off = offset();
2782 ldrshw(dst, src);
2783 return off;
2784 }
2785
2786 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2787 int off = offset();
2788 ldrsbw(dst, src);
2789 return off;
2790 }
2791
2792 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2793 switch (size_in_bytes) {
2794 case 8: ldr(dst, src); break;
2795 case 4: ldrw(dst, src); break;
2796 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2797 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2798 default: ShouldNotReachHere();
2799 }
2800 }
2801
2802 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2803 switch (size_in_bytes) {
2804 case 8: str(src, dst); break;
2805 case 4: strw(src, dst); break;
2806 case 2: strh(src, dst); break;
2807 case 1: strb(src, dst); break;
2808 default: ShouldNotReachHere();
2809 }
2810 }
2811
2812 void MacroAssembler::decrementw(Register reg, int value)
2813 {
2814 if (value < 0) { incrementw(reg, -value); return; }
2815 if (value == 0) { return; }
2816 if (value < (1 << 12)) { subw(reg, reg, value); return; }
2817 /* else */ {
2818 guarantee(reg != rscratch2, "invalid dst for register decrement");
2819 movw(rscratch2, (unsigned)value);
2820 subw(reg, reg, rscratch2);
2821 }
2822 }
2823
2824 void MacroAssembler::decrement(Register reg, int value)
2825 {
2826 if (value < 0) { increment(reg, -value); return; }
2827 if (value == 0) { return; }
2828 if (value < (1 << 12)) { sub(reg, reg, value); return; }
2829 /* else */ {
2830 assert(reg != rscratch2, "invalid dst for register decrement");
2831 mov(rscratch2, (uint64_t)value);
2832 sub(reg, reg, rscratch2);
2833 }
2834 }
2835
2836 void MacroAssembler::decrementw(Address dst, int value)
2837 {
2838 assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2839 if (dst.getMode() == Address::literal) {
2840 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2841 lea(rscratch2, dst);
2842 dst = Address(rscratch2);
2843 }
2844 ldrw(rscratch1, dst);
2845 decrementw(rscratch1, value);
2846 strw(rscratch1, dst);
2847 }
2848
2849 void MacroAssembler::decrement(Address dst, int value)
2850 {
2851 assert(!dst.uses(rscratch1), "invalid address for decrement");
2852 if (dst.getMode() == Address::literal) {
2853 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2854 lea(rscratch2, dst);
2855 dst = Address(rscratch2);
2856 }
2857 ldr(rscratch1, dst);
2858 decrement(rscratch1, value);
2859 str(rscratch1, dst);
2860 }
2861
2862 void MacroAssembler::incrementw(Register reg, int value)
2863 {
2864 if (value < 0) { decrementw(reg, -value); return; }
2865 if (value == 0) { return; }
2866 if (value < (1 << 12)) { addw(reg, reg, value); return; }
2867 /* else */ {
2868 assert(reg != rscratch2, "invalid dst for register increment");
2869 movw(rscratch2, (unsigned)value);
2870 addw(reg, reg, rscratch2);
2871 }
2872 }
2873
2874 void MacroAssembler::increment(Register reg, int value)
2875 {
2876 if (value < 0) { decrement(reg, -value); return; }
2877 if (value == 0) { return; }
2878 if (value < (1 << 12)) { add(reg, reg, value); return; }
2879 /* else */ {
2880 assert(reg != rscratch2, "invalid dst for register increment");
2881 movw(rscratch2, (unsigned)value);
2882 add(reg, reg, rscratch2);
2883 }
2884 }
2885
2886 void MacroAssembler::incrementw(Address dst, int value)
2887 {
2888 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2889 if (dst.getMode() == Address::literal) {
2890 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2891 lea(rscratch2, dst);
2892 dst = Address(rscratch2);
2893 }
2894 ldrw(rscratch1, dst);
2895 incrementw(rscratch1, value);
2896 strw(rscratch1, dst);
2897 }
2898
2899 void MacroAssembler::increment(Address dst, int value)
2900 {
2901 assert(!dst.uses(rscratch1), "invalid dst for address increment");
2902 if (dst.getMode() == Address::literal) {
2903 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2904 lea(rscratch2, dst);
2905 dst = Address(rscratch2);
2906 }
2907 ldr(rscratch1, dst);
2908 increment(rscratch1, value);
2909 str(rscratch1, dst);
2910 }
2911
2912 // Push lots of registers in the bit set supplied. Don't push sp.
2913 // Return the number of words pushed
2914 int MacroAssembler::push(unsigned int bitset, Register stack) {
2915 int words_pushed = 0;
2916
2917 // Scan bitset to accumulate register pairs
2918 unsigned char regs[32];
2919 int count = 0;
2920 for (int reg = 0; reg <= 30; reg++) {
2921 if (1 & bitset)
2922 regs[count++] = reg;
2923 bitset >>= 1;
2924 }
2925 regs[count++] = zr->raw_encoding();
2926 count &= ~1; // Only push an even number of regs
2927
2928 if (count) {
2929 stp(as_Register(regs[0]), as_Register(regs[1]),
2930 Address(pre(stack, -count * wordSize)));
2931 words_pushed += 2;
2932 }
2933 for (int i = 2; i < count; i += 2) {
2934 stp(as_Register(regs[i]), as_Register(regs[i+1]),
2935 Address(stack, i * wordSize));
2936 words_pushed += 2;
2937 }
2938
2939 assert(words_pushed == count, "oops, pushed != count");
2940
2941 return count;
2942 }
2943
2944 int MacroAssembler::pop(unsigned int bitset, Register stack) {
2945 int words_pushed = 0;
2946
2947 // Scan bitset to accumulate register pairs
2948 unsigned char regs[32];
2949 int count = 0;
2950 for (int reg = 0; reg <= 30; reg++) {
2951 if (1 & bitset)
2952 regs[count++] = reg;
2953 bitset >>= 1;
2954 }
2955 regs[count++] = zr->raw_encoding();
2956 count &= ~1;
2957
2958 for (int i = 2; i < count; i += 2) {
2959 ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2960 Address(stack, i * wordSize));
2961 words_pushed += 2;
2962 }
2963 if (count) {
2964 ldp(as_Register(regs[0]), as_Register(regs[1]),
2965 Address(post(stack, count * wordSize)));
2966 words_pushed += 2;
2967 }
2968
2969 assert(words_pushed == count, "oops, pushed != count");
2970
2971 return count;
2972 }
2973
2974 // Push lots of registers in the bit set supplied. Don't push sp.
2975 // Return the number of dwords pushed
2976 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
2977 int words_pushed = 0;
2978 bool use_sve = false;
2979 int sve_vector_size_in_bytes = 0;
2980
2981 #ifdef COMPILER2
2982 use_sve = Matcher::supports_scalable_vector();
2983 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2984 #endif
2985
2986 // Scan bitset to accumulate register pairs
2987 unsigned char regs[32];
2988 int count = 0;
2989 for (int reg = 0; reg <= 31; reg++) {
2990 if (1 & bitset)
2991 regs[count++] = reg;
2992 bitset >>= 1;
2993 }
2994
2995 if (count == 0) {
2996 return 0;
2997 }
2998
2999 if (mode == PushPopFull) {
3000 if (use_sve && sve_vector_size_in_bytes > 16) {
3001 mode = PushPopSVE;
3002 } else {
3003 mode = PushPopNeon;
3004 }
3005 }
3006
3007 #ifndef PRODUCT
3008 {
3009 char buffer[48];
3010 if (mode == PushPopSVE) {
3011 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
3012 } else if (mode == PushPopNeon) {
3013 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
3014 } else {
3015 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
3016 }
3017 block_comment(buffer);
3018 }
3019 #endif
3020
3021 if (mode == PushPopSVE) {
3022 sub(stack, stack, sve_vector_size_in_bytes * count);
3023 for (int i = 0; i < count; i++) {
3024 sve_str(as_FloatRegister(regs[i]), Address(stack, i));
3025 }
3026 return count * sve_vector_size_in_bytes / 8;
3027 }
3028
3029 if (mode == PushPopNeon) {
3030 if (count == 1) {
3031 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
3032 return 2;
3033 }
3034
3035 bool odd = (count & 1) == 1;
3036 int push_slots = count + (odd ? 1 : 0);
3037
3038 // Always pushing full 128 bit registers.
3039 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
3040 words_pushed += 2;
3041
3042 for (int i = 2; i + 1 < count; i += 2) {
3043 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3044 words_pushed += 2;
3045 }
3046
3047 if (odd) {
3048 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3049 words_pushed++;
3050 }
3051
3052 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3053 return count * 2;
3054 }
3055
3056 if (mode == PushPopFp) {
3057 bool odd = (count & 1) == 1;
3058 int push_slots = count + (odd ? 1 : 0);
3059
3060 if (count == 1) {
3061 // Stack pointer must be 16 bytes aligned
3062 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
3063 return 1;
3064 }
3065
3066 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
3067 words_pushed += 2;
3068
3069 for (int i = 2; i + 1 < count; i += 2) {
3070 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3071 words_pushed += 2;
3072 }
3073
3074 if (odd) {
3075 // Stack pointer must be 16 bytes aligned
3076 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3077 words_pushed++;
3078 }
3079
3080 assert(words_pushed == count, "oops, pushed != count");
3081
3082 return count;
3083 }
3084
3085 return 0;
3086 }
3087
3088 // Return the number of dwords popped
3089 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
3090 int words_pushed = 0;
3091 bool use_sve = false;
3092 int sve_vector_size_in_bytes = 0;
3093
3094 #ifdef COMPILER2
3095 use_sve = Matcher::supports_scalable_vector();
3096 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3097 #endif
3098 // Scan bitset to accumulate register pairs
3099 unsigned char regs[32];
3100 int count = 0;
3101 for (int reg = 0; reg <= 31; reg++) {
3102 if (1 & bitset)
3103 regs[count++] = reg;
3104 bitset >>= 1;
3105 }
3106
3107 if (count == 0) {
3108 return 0;
3109 }
3110
3111 if (mode == PushPopFull) {
3112 if (use_sve && sve_vector_size_in_bytes > 16) {
3113 mode = PushPopSVE;
3114 } else {
3115 mode = PushPopNeon;
3116 }
3117 }
3118
3119 #ifndef PRODUCT
3120 {
3121 char buffer[48];
3122 if (mode == PushPopSVE) {
3123 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
3124 } else if (mode == PushPopNeon) {
3125 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
3126 } else {
3127 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
3128 }
3129 block_comment(buffer);
3130 }
3131 #endif
3132
3133 if (mode == PushPopSVE) {
3134 for (int i = count - 1; i >= 0; i--) {
3135 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
3136 }
3137 add(stack, stack, sve_vector_size_in_bytes * count);
3138 return count * sve_vector_size_in_bytes / 8;
3139 }
3140
3141 if (mode == PushPopNeon) {
3142 if (count == 1) {
3143 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
3144 return 2;
3145 }
3146
3147 bool odd = (count & 1) == 1;
3148 int push_slots = count + (odd ? 1 : 0);
3149
3150 if (odd) {
3151 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3152 words_pushed++;
3153 }
3154
3155 for (int i = 2; i + 1 < count; i += 2) {
3156 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3157 words_pushed += 2;
3158 }
3159
3160 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
3161 words_pushed += 2;
3162
3163 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3164
3165 return count * 2;
3166 }
3167
3168 if (mode == PushPopFp) {
3169 bool odd = (count & 1) == 1;
3170 int push_slots = count + (odd ? 1 : 0);
3171
3172 if (count == 1) {
3173 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
3174 return 1;
3175 }
3176
3177 if (odd) {
3178 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3179 words_pushed++;
3180 }
3181
3182 for (int i = 2; i + 1 < count; i += 2) {
3183 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3184 words_pushed += 2;
3185 }
3186
3187 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
3188 words_pushed += 2;
3189
3190 assert(words_pushed == count, "oops, pushed != count");
3191
3192 return count;
3193 }
3194
3195 return 0;
3196 }
3197
3198 // Return the number of dwords pushed
3199 int MacroAssembler::push_p(unsigned int bitset, Register stack) {
3200 bool use_sve = false;
3201 int sve_predicate_size_in_slots = 0;
3202
3203 #ifdef COMPILER2
3204 use_sve = Matcher::supports_scalable_vector();
3205 if (use_sve) {
3206 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3207 }
3208 #endif
3209
3210 if (!use_sve) {
3211 return 0;
3212 }
3213
3214 unsigned char regs[PRegister::number_of_registers];
3215 int count = 0;
3216 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3217 if (1 & bitset)
3218 regs[count++] = reg;
3219 bitset >>= 1;
3220 }
3221
3222 if (count == 0) {
3223 return 0;
3224 }
3225
3226 int total_push_bytes = align_up(sve_predicate_size_in_slots *
3227 VMRegImpl::stack_slot_size * count, 16);
3228 sub(stack, stack, total_push_bytes);
3229 for (int i = 0; i < count; i++) {
3230 sve_str(as_PRegister(regs[i]), Address(stack, i));
3231 }
3232 return total_push_bytes / 8;
3233 }
3234
3235 // Return the number of dwords popped
3236 int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
3237 bool use_sve = false;
3238 int sve_predicate_size_in_slots = 0;
3239
3240 #ifdef COMPILER2
3241 use_sve = Matcher::supports_scalable_vector();
3242 if (use_sve) {
3243 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3244 }
3245 #endif
3246
3247 if (!use_sve) {
3248 return 0;
3249 }
3250
3251 unsigned char regs[PRegister::number_of_registers];
3252 int count = 0;
3253 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3254 if (1 & bitset)
3255 regs[count++] = reg;
3256 bitset >>= 1;
3257 }
3258
3259 if (count == 0) {
3260 return 0;
3261 }
3262
3263 int total_pop_bytes = align_up(sve_predicate_size_in_slots *
3264 VMRegImpl::stack_slot_size * count, 16);
3265 for (int i = count - 1; i >= 0; i--) {
3266 sve_ldr(as_PRegister(regs[i]), Address(stack, i));
3267 }
3268 add(stack, stack, total_pop_bytes);
3269 return total_pop_bytes / 8;
3270 }
3271
3272 #ifdef ASSERT
3273 void MacroAssembler::verify_heapbase(const char* msg) {
3274 #if 0
3275 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
3276 assert (Universe::heap() != nullptr, "java heap should be initialized");
3277 if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
3278 // rheapbase is allocated as general register
3279 return;
3280 }
3281 if (CheckCompressedOops) {
3282 Label ok;
3283 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
3284 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3285 br(Assembler::EQ, ok);
3286 stop(msg);
3287 bind(ok);
3288 pop(1 << rscratch1->encoding(), sp);
3289 }
3290 #endif
3291 }
3292 #endif
3293
3294 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3295 assert_different_registers(value, tmp1, tmp2);
3296 Label done, tagged, weak_tagged;
3297
3298 cbz(value, done); // Use null as-is.
3299 tst(value, JNIHandles::tag_mask); // Test for tag.
3300 br(Assembler::NE, tagged);
3301
3302 // Resolve local handle
3303 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
3304 verify_oop(value);
3305 b(done);
3306
3307 bind(tagged);
3308 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
3309 tbnz(value, 0, weak_tagged); // Test for weak tag.
3310
3311 // Resolve global handle
3312 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3313 verify_oop(value);
3314 b(done);
3315
3316 bind(weak_tagged);
3317 // Resolve jweak.
3318 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3319 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3320 verify_oop(value);
3321
3322 bind(done);
3323 }
3324
3325 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3326 assert_different_registers(value, tmp1, tmp2);
3327 Label done;
3328
3329 cbz(value, done); // Use null as-is.
3330
3331 #ifdef ASSERT
3332 {
3333 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3334 Label valid_global_tag;
3335 tbnz(value, 1, valid_global_tag); // Test for global tag
3336 stop("non global jobject using resolve_global_jobject");
3337 bind(valid_global_tag);
3338 }
3339 #endif
3340
3341 // Resolve global handle
3342 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3343 verify_oop(value);
3344
3345 bind(done);
3346 }
3347
3348 void MacroAssembler::stop(const char* msg) {
3349 // Skip AOT caching C strings in scratch buffer.
3350 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
3351 BLOCK_COMMENT(str);
3352 // load msg into r0 so we can access it from the signal handler
3353 // ExternalAddress enables saving and restoring via the code cache
3354 lea(c_rarg0, ExternalAddress((address) str));
3355 dcps1(0xdeae);
3356 }
3357
3358 void MacroAssembler::unimplemented(const char* what) {
3359 const char* buf = nullptr;
3360 {
3361 ResourceMark rm;
3362 stringStream ss;
3363 ss.print("unimplemented: %s", what);
3364 buf = code_string(ss.as_string());
3365 }
3366 stop(buf);
3367 }
3368
3369 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3370 #ifdef ASSERT
3371 Label OK;
3372 br(cc, OK);
3373 stop(msg);
3374 bind(OK);
3375 #endif
3376 }
3377
3378 // If a constant does not fit in an immediate field, generate some
3379 // number of MOV instructions and then perform the operation.
3380 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3381 add_sub_imm_insn insn1,
3382 add_sub_reg_insn insn2,
3383 bool is32) {
3384 assert(Rd != zr, "Rd = zr and not setting flags?");
3385 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3386 if (fits) {
3387 (this->*insn1)(Rd, Rn, imm);
3388 } else {
3389 if (g_uabs(imm) < (1 << 24)) {
3390 (this->*insn1)(Rd, Rn, imm & -(1 << 12));
3391 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3392 } else {
3393 assert_different_registers(Rd, Rn);
3394 mov(Rd, imm);
3395 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3396 }
3397 }
3398 }
3399
3400 // Separate vsn which sets the flags. Optimisations are more restricted
3401 // because we must set the flags correctly.
3402 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3403 add_sub_imm_insn insn1,
3404 add_sub_reg_insn insn2,
3405 bool is32) {
3406 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3407 if (fits) {
3408 (this->*insn1)(Rd, Rn, imm);
3409 } else {
3410 assert_different_registers(Rd, Rn);
3411 assert(Rd != zr, "overflow in immediate operand");
3412 mov(Rd, imm);
3413 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3414 }
3415 }
3416
3417
3418 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3419 if (increment.is_register()) {
3420 add(Rd, Rn, increment.as_register());
3421 } else {
3422 add(Rd, Rn, increment.as_constant());
3423 }
3424 }
3425
3426 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3427 if (increment.is_register()) {
3428 addw(Rd, Rn, increment.as_register());
3429 } else {
3430 addw(Rd, Rn, increment.as_constant());
3431 }
3432 }
3433
3434 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3435 if (decrement.is_register()) {
3436 sub(Rd, Rn, decrement.as_register());
3437 } else {
3438 sub(Rd, Rn, decrement.as_constant());
3439 }
3440 }
3441
3442 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3443 if (decrement.is_register()) {
3444 subw(Rd, Rn, decrement.as_register());
3445 } else {
3446 subw(Rd, Rn, decrement.as_constant());
3447 }
3448 }
3449
3450 void MacroAssembler::reinit_heapbase()
3451 {
3452 if (UseCompressedOops) {
3453 if (Universe::is_fully_initialized()) {
3454 mov(rheapbase, CompressedOops::base());
3455 } else {
3456 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3457 ldr(rheapbase, Address(rheapbase));
3458 }
3459 }
3460 }
3461
3462 // this simulates the behaviour of the x86 cmpxchg instruction using a
3463 // load linked/store conditional pair. we use the acquire/release
3464 // versions of these instructions so that we flush pending writes as
3465 // per Java semantics.
3466
3467 // n.b the x86 version assumes the old value to be compared against is
3468 // in rax and updates rax with the value located in memory if the
3469 // cmpxchg fails. we supply a register for the old value explicitly
3470
3471 // the aarch64 load linked/store conditional instructions do not
3472 // accept an offset. so, unlike x86, we must provide a plain register
3473 // to identify the memory word to be compared/exchanged rather than a
3474 // register+offset Address.
3475
3476 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
3477 Label &succeed, Label *fail) {
3478 // oldv holds comparison value
3479 // newv holds value to write in exchange
3480 // addr identifies memory word to compare against/update
3481 if (UseLSE) {
3482 mov(tmp, oldv);
3483 casal(Assembler::xword, oldv, newv, addr);
3484 cmp(tmp, oldv);
3485 br(Assembler::EQ, succeed);
3486 membar(AnyAny);
3487 } else {
3488 Label retry_load, nope;
3489 prfm(Address(addr), PSTL1STRM);
3490 bind(retry_load);
3491 // flush and load exclusive from the memory location
3492 // and fail if it is not what we expect
3493 ldaxr(tmp, addr);
3494 cmp(tmp, oldv);
3495 br(Assembler::NE, nope);
3496 // if we store+flush with no intervening write tmp will be zero
3497 stlxr(tmp, newv, addr);
3498 cbzw(tmp, succeed);
3499 // retry so we only ever return after a load fails to compare
3500 // ensures we don't return a stale value after a failed write.
3501 b(retry_load);
3502 // if the memory word differs we return it in oldv and signal a fail
3503 bind(nope);
3504 membar(AnyAny);
3505 mov(oldv, tmp);
3506 }
3507 if (fail)
3508 b(*fail);
3509 }
3510
3511 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
3512 Label &succeed, Label *fail) {
3513 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
3514 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
3515 }
3516
3517 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
3518 Label &succeed, Label *fail) {
3519 // oldv holds comparison value
3520 // newv holds value to write in exchange
3521 // addr identifies memory word to compare against/update
3522 // tmp returns 0/1 for success/failure
3523 if (UseLSE) {
3524 mov(tmp, oldv);
3525 casal(Assembler::word, oldv, newv, addr);
3526 cmp(tmp, oldv);
3527 br(Assembler::EQ, succeed);
3528 membar(AnyAny);
3529 } else {
3530 Label retry_load, nope;
3531 prfm(Address(addr), PSTL1STRM);
3532 bind(retry_load);
3533 // flush and load exclusive from the memory location
3534 // and fail if it is not what we expect
3535 ldaxrw(tmp, addr);
3536 cmp(tmp, oldv);
3537 br(Assembler::NE, nope);
3538 // if we store+flush with no intervening write tmp will be zero
3539 stlxrw(tmp, newv, addr);
3540 cbzw(tmp, succeed);
3541 // retry so we only ever return after a load fails to compare
3542 // ensures we don't return a stale value after a failed write.
3543 b(retry_load);
3544 // if the memory word differs we return it in oldv and signal a fail
3545 bind(nope);
3546 membar(AnyAny);
3547 mov(oldv, tmp);
3548 }
3549 if (fail)
3550 b(*fail);
3551 }
3552
3553 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3554 // doesn't retry and may fail spuriously. If the oldval is wanted,
3555 // Pass a register for the result, otherwise pass noreg.
3556
3557 // Clobbers rscratch1
3558 void MacroAssembler::cmpxchg(Register addr, Register expected,
3559 Register new_val,
3560 enum operand_size size,
3561 bool acquire, bool release,
3562 bool weak,
3563 Register result) {
3564 if (result == noreg) result = rscratch1;
3565 BLOCK_COMMENT("cmpxchg {");
3566 if (UseLSE) {
3567 mov(result, expected);
3568 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3569 compare_eq(result, expected, size);
3570 #ifdef ASSERT
3571 // Poison rscratch1 which is written on !UseLSE branch
3572 mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3573 #endif
3574 } else {
3575 Label retry_load, done;
3576 prfm(Address(addr), PSTL1STRM);
3577 bind(retry_load);
3578 load_exclusive(result, addr, size, acquire);
3579 compare_eq(result, expected, size);
3580 br(Assembler::NE, done);
3581 store_exclusive(rscratch1, new_val, addr, size, release);
3582 if (weak) {
3583 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
3584 } else {
3585 cbnzw(rscratch1, retry_load);
3586 }
3587 bind(done);
3588 }
3589 BLOCK_COMMENT("} cmpxchg");
3590 }
3591
3592 // A generic comparison. Only compares for equality, clobbers rscratch1.
3593 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3594 if (size == xword) {
3595 cmp(rm, rn);
3596 } else if (size == word) {
3597 cmpw(rm, rn);
3598 } else if (size == halfword) {
3599 eorw(rscratch1, rm, rn);
3600 ands(zr, rscratch1, 0xffff);
3601 } else if (size == byte) {
3602 eorw(rscratch1, rm, rn);
3603 ands(zr, rscratch1, 0xff);
3604 } else {
3605 ShouldNotReachHere();
3606 }
3607 }
3608
3609
3610 static bool different(Register a, RegisterOrConstant b, Register c) {
3611 if (b.is_constant())
3612 return a != c;
3613 else
3614 return a != b.as_register() && a != c && b.as_register() != c;
3615 }
3616
3617 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
3618 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3619 if (UseLSE) { \
3620 prev = prev->is_valid() ? prev : zr; \
3621 if (incr.is_register()) { \
3622 AOP(sz, incr.as_register(), prev, addr); \
3623 } else { \
3624 mov(rscratch2, incr.as_constant()); \
3625 AOP(sz, rscratch2, prev, addr); \
3626 } \
3627 return; \
3628 } \
3629 Register result = rscratch2; \
3630 if (prev->is_valid()) \
3631 result = different(prev, incr, addr) ? prev : rscratch2; \
3632 \
3633 Label retry_load; \
3634 prfm(Address(addr), PSTL1STRM); \
3635 bind(retry_load); \
3636 LDXR(result, addr); \
3637 OP(rscratch1, result, incr); \
3638 STXR(rscratch2, rscratch1, addr); \
3639 cbnzw(rscratch2, retry_load); \
3640 if (prev->is_valid() && prev != result) { \
3641 IOP(prev, rscratch1, incr); \
3642 } \
3643 }
3644
3645 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3646 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3647 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3648 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3649
3650 #undef ATOMIC_OP
3651
3652 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
3653 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3654 if (UseLSE) { \
3655 prev = prev->is_valid() ? prev : zr; \
3656 AOP(sz, newv, prev, addr); \
3657 return; \
3658 } \
3659 Register result = rscratch2; \
3660 if (prev->is_valid()) \
3661 result = different(prev, newv, addr) ? prev : rscratch2; \
3662 \
3663 Label retry_load; \
3664 prfm(Address(addr), PSTL1STRM); \
3665 bind(retry_load); \
3666 LDXR(result, addr); \
3667 STXR(rscratch1, newv, addr); \
3668 cbnzw(rscratch1, retry_load); \
3669 if (prev->is_valid() && prev != result) \
3670 mov(prev, result); \
3671 }
3672
3673 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3674 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3675 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3676 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3677 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3678 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3679
3680 #undef ATOMIC_XCHG
3681
3682 #ifndef PRODUCT
3683 extern "C" void findpc(intptr_t x);
3684 #endif
3685
3686 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3687 {
3688 // In order to get locks to work, we need to fake a in_VM state
3689 if (ShowMessageBoxOnError ) {
3690 JavaThread* thread = JavaThread::current();
3691 JavaThreadState saved_state = thread->thread_state();
3692 thread->set_thread_state(_thread_in_vm);
3693 #ifndef PRODUCT
3694 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3695 ttyLocker ttyl;
3696 BytecodeCounter::print();
3697 }
3698 #endif
3699 if (os::message_box(msg, "Execution stopped, print registers?")) {
3700 ttyLocker ttyl;
3701 tty->print_cr(" pc = 0x%016" PRIx64, pc);
3702 #ifndef PRODUCT
3703 tty->cr();
3704 findpc(pc);
3705 tty->cr();
3706 #endif
3707 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3708 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3709 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3710 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3711 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3712 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3713 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3714 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3715 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3716 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3717 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3718 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3719 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3720 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3721 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3722 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3723 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3724 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3725 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3726 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3727 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3728 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3729 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3730 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3731 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3732 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3733 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3734 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3735 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3736 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3737 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3738 BREAKPOINT;
3739 }
3740 }
3741 fatal("DEBUG MESSAGE: %s", msg);
3742 }
3743
3744 RegSet MacroAssembler::call_clobbered_gp_registers() {
3745 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3746 #ifndef R18_RESERVED
3747 regs += r18_tls;
3748 #endif
3749 return regs;
3750 }
3751
3752 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3753 int step = 4 * wordSize;
3754 push(call_clobbered_gp_registers() - exclude, sp);
3755 sub(sp, sp, step);
3756 mov(rscratch1, -step);
3757 // Push v0-v7, v16-v31.
3758 for (int i = 31; i>= 4; i -= 4) {
3759 if (i <= v7->encoding() || i >= v16->encoding())
3760 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3761 as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3762 }
3763 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3764 as_FloatRegister(3), T1D, Address(sp));
3765 }
3766
3767 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3768 for (int i = 0; i < 32; i += 4) {
3769 if (i <= v7->encoding() || i >= v16->encoding())
3770 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3771 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3772 }
3773
3774 reinitialize_ptrue();
3775
3776 pop(call_clobbered_gp_registers() - exclude, sp);
3777 }
3778
3779 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3780 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3781 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3782 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3783 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3784 for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3785 sve_str(as_FloatRegister(i), Address(sp, i));
3786 }
3787 } else {
3788 int step = (save_vectors ? 8 : 4) * wordSize;
3789 mov(rscratch1, -step);
3790 sub(sp, sp, step);
3791 for (int i = 28; i >= 4; i -= 4) {
3792 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3793 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3794 }
3795 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3796 }
3797 if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3798 sub(sp, sp, total_predicate_in_bytes);
3799 for (int i = 0; i < PRegister::number_of_registers; i++) {
3800 sve_str(as_PRegister(i), Address(sp, i));
3801 }
3802 }
3803 }
3804
3805 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3806 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3807 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3808 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3809 sve_ldr(as_PRegister(i), Address(sp, i));
3810 }
3811 add(sp, sp, total_predicate_in_bytes);
3812 }
3813 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3814 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3815 sve_ldr(as_FloatRegister(i), Address(sp, i));
3816 }
3817 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3818 } else {
3819 int step = (restore_vectors ? 8 : 4) * wordSize;
3820 for (int i = 0; i <= 28; i += 4)
3821 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3822 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3823 }
3824
3825 // We may use predicate registers and rely on ptrue with SVE,
3826 // regardless of wide vector (> 8 bytes) used or not.
3827 if (use_sve) {
3828 reinitialize_ptrue();
3829 }
3830
3831 // integer registers except lr & sp
3832 pop(RegSet::range(r0, r17), sp);
3833 #ifdef R18_RESERVED
3834 ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3835 pop(RegSet::range(r20, r29), sp);
3836 #else
3837 pop(RegSet::range(r18_tls, r29), sp);
3838 #endif
3839 }
3840
3841 /**
3842 * Helpers for multiply_to_len().
3843 */
3844 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3845 Register src1, Register src2) {
3846 adds(dest_lo, dest_lo, src1);
3847 adc(dest_hi, dest_hi, zr);
3848 adds(dest_lo, dest_lo, src2);
3849 adc(final_dest_hi, dest_hi, zr);
3850 }
3851
3852 // Generate an address from (r + r1 extend offset). "size" is the
3853 // size of the operand. The result may be in rscratch2.
3854 Address MacroAssembler::offsetted_address(Register r, Register r1,
3855 Address::extend ext, int offset, int size) {
3856 if (offset || (ext.shift() % size != 0)) {
3857 lea(rscratch2, Address(r, r1, ext));
3858 return Address(rscratch2, offset);
3859 } else {
3860 return Address(r, r1, ext);
3861 }
3862 }
3863
3864 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3865 {
3866 assert(offset >= 0, "spill to negative address?");
3867 // Offset reachable ?
3868 // Not aligned - 9 bits signed offset
3869 // Aligned - 12 bits unsigned offset shifted
3870 Register base = sp;
3871 if ((offset & (size-1)) && offset >= (1<<8)) {
3872 add(tmp, base, offset & ((1<<12)-1));
3873 base = tmp;
3874 offset &= -1u<<12;
3875 }
3876
3877 if (offset >= (1<<12) * size) {
3878 add(tmp, base, offset & (((1<<12)-1)<<12));
3879 base = tmp;
3880 offset &= ~(((1<<12)-1)<<12);
3881 }
3882
3883 return Address(base, offset);
3884 }
3885
3886 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3887 assert(offset >= 0, "spill to negative address?");
3888
3889 Register base = sp;
3890
3891 // An immediate offset in the range 0 to 255 which is multiplied
3892 // by the current vector or predicate register size in bytes.
3893 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3894 return Address(base, offset / sve_reg_size_in_bytes);
3895 }
3896
3897 add(tmp, base, offset);
3898 return Address(tmp);
3899 }
3900
3901 // Checks whether offset is aligned.
3902 // Returns true if it is, else false.
3903 bool MacroAssembler::merge_alignment_check(Register base,
3904 size_t size,
3905 int64_t cur_offset,
3906 int64_t prev_offset) const {
3907 if (AvoidUnalignedAccesses) {
3908 if (base == sp) {
3909 // Checks whether low offset if aligned to pair of registers.
3910 int64_t pair_mask = size * 2 - 1;
3911 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3912 return (offset & pair_mask) == 0;
3913 } else { // If base is not sp, we can't guarantee the access is aligned.
3914 return false;
3915 }
3916 } else {
3917 int64_t mask = size - 1;
3918 // Load/store pair instruction only supports element size aligned offset.
3919 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3920 }
3921 }
3922
3923 // Checks whether current and previous loads/stores can be merged.
3924 // Returns true if it can be merged, else false.
3925 bool MacroAssembler::ldst_can_merge(Register rt,
3926 const Address &adr,
3927 size_t cur_size_in_bytes,
3928 bool is_store) const {
3929 address prev = pc() - NativeInstruction::instruction_size;
3930 address last = code()->last_insn();
3931
3932 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3933 return false;
3934 }
3935
3936 if (adr.getMode() != Address::base_plus_offset || prev != last) {
3937 return false;
3938 }
3939
3940 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3941 size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3942
3943 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3944 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3945
3946 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
3947 return false;
3948 }
3949
3950 int64_t max_offset = 63 * prev_size_in_bytes;
3951 int64_t min_offset = -64 * prev_size_in_bytes;
3952
3953 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
3954
3955 // Only same base can be merged.
3956 if (adr.base() != prev_ldst->base()) {
3957 return false;
3958 }
3959
3960 int64_t cur_offset = adr.offset();
3961 int64_t prev_offset = prev_ldst->offset();
3962 size_t diff = abs(cur_offset - prev_offset);
3963 if (diff != prev_size_in_bytes) {
3964 return false;
3965 }
3966
3967 // Following cases can not be merged:
3968 // ldr x2, [x2, #8]
3969 // ldr x3, [x2, #16]
3970 // or:
3971 // ldr x2, [x3, #8]
3972 // ldr x2, [x3, #16]
3973 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
3974 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
3975 return false;
3976 }
3977
3978 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3979 // Offset range must be in ldp/stp instruction's range.
3980 if (low_offset > max_offset || low_offset < min_offset) {
3981 return false;
3982 }
3983
3984 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
3985 return true;
3986 }
3987
3988 return false;
3989 }
3990
3991 // Merge current load/store with previous load/store into ldp/stp.
3992 void MacroAssembler::merge_ldst(Register rt,
3993 const Address &adr,
3994 size_t cur_size_in_bytes,
3995 bool is_store) {
3996
3997 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
3998
3999 Register rt_low, rt_high;
4000 address prev = pc() - NativeInstruction::instruction_size;
4001 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
4002
4003 int64_t offset;
4004
4005 if (adr.offset() < prev_ldst->offset()) {
4006 offset = adr.offset();
4007 rt_low = rt;
4008 rt_high = prev_ldst->target();
4009 } else {
4010 offset = prev_ldst->offset();
4011 rt_low = prev_ldst->target();
4012 rt_high = rt;
4013 }
4014
4015 Address adr_p = Address(prev_ldst->base(), offset);
4016 // Overwrite previous generated binary.
4017 code_section()->set_end(prev);
4018
4019 const size_t sz = prev_ldst->size_in_bytes();
4020 assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
4021 if (!is_store) {
4022 BLOCK_COMMENT("merged ldr pair");
4023 if (sz == 8) {
4024 ldp(rt_low, rt_high, adr_p);
4025 } else {
4026 ldpw(rt_low, rt_high, adr_p);
4027 }
4028 } else {
4029 BLOCK_COMMENT("merged str pair");
4030 if (sz == 8) {
4031 stp(rt_low, rt_high, adr_p);
4032 } else {
4033 stpw(rt_low, rt_high, adr_p);
4034 }
4035 }
4036 }
4037
4038 /**
4039 * Multiply 64 bit by 64 bit first loop.
4040 */
4041 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
4042 Register y, Register y_idx, Register z,
4043 Register carry, Register product,
4044 Register idx, Register kdx) {
4045 //
4046 // jlong carry, x[], y[], z[];
4047 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4048 // huge_128 product = y[idx] * x[xstart] + carry;
4049 // z[kdx] = (jlong)product;
4050 // carry = (jlong)(product >>> 64);
4051 // }
4052 // z[xstart] = carry;
4053 //
4054
4055 Label L_first_loop, L_first_loop_exit;
4056 Label L_one_x, L_one_y, L_multiply;
4057
4058 subsw(xstart, xstart, 1);
4059 br(Assembler::MI, L_one_x);
4060
4061 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
4062 ldr(x_xstart, Address(rscratch1));
4063 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
4064
4065 bind(L_first_loop);
4066 subsw(idx, idx, 1);
4067 br(Assembler::MI, L_first_loop_exit);
4068 subsw(idx, idx, 1);
4069 br(Assembler::MI, L_one_y);
4070 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4071 ldr(y_idx, Address(rscratch1));
4072 ror(y_idx, y_idx, 32); // convert big-endian to little-endian
4073 bind(L_multiply);
4074
4075 // AArch64 has a multiply-accumulate instruction that we can't use
4076 // here because it has no way to process carries, so we have to use
4077 // separate add and adc instructions. Bah.
4078 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
4079 mul(product, x_xstart, y_idx);
4080 adds(product, product, carry);
4081 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
4082
4083 subw(kdx, kdx, 2);
4084 ror(product, product, 32); // back to big-endian
4085 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
4086
4087 b(L_first_loop);
4088
4089 bind(L_one_y);
4090 ldrw(y_idx, Address(y, 0));
4091 b(L_multiply);
4092
4093 bind(L_one_x);
4094 ldrw(x_xstart, Address(x, 0));
4095 b(L_first_loop);
4096
4097 bind(L_first_loop_exit);
4098 }
4099
4100 /**
4101 * Multiply 128 bit by 128. Unrolled inner loop.
4102 *
4103 */
4104 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
4105 Register carry, Register carry2,
4106 Register idx, Register jdx,
4107 Register yz_idx1, Register yz_idx2,
4108 Register tmp, Register tmp3, Register tmp4,
4109 Register tmp6, Register product_hi) {
4110
4111 // jlong carry, x[], y[], z[];
4112 // int kdx = ystart+1;
4113 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
4114 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
4115 // jlong carry2 = (jlong)(tmp3 >>> 64);
4116 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
4117 // carry = (jlong)(tmp4 >>> 64);
4118 // z[kdx+idx+1] = (jlong)tmp3;
4119 // z[kdx+idx] = (jlong)tmp4;
4120 // }
4121 // idx += 2;
4122 // if (idx > 0) {
4123 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
4124 // z[kdx+idx] = (jlong)yz_idx1;
4125 // carry = (jlong)(yz_idx1 >>> 64);
4126 // }
4127 //
4128
4129 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
4130
4131 lsrw(jdx, idx, 2);
4132
4133 bind(L_third_loop);
4134
4135 subsw(jdx, jdx, 1);
4136 br(Assembler::MI, L_third_loop_exit);
4137 subw(idx, idx, 4);
4138
4139 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4140
4141 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
4142
4143 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4144
4145 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
4146 ror(yz_idx2, yz_idx2, 32);
4147
4148 ldp(rscratch2, rscratch1, Address(tmp6, 0));
4149
4150 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4151 umulh(tmp4, product_hi, yz_idx1);
4152
4153 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
4154 ror(rscratch2, rscratch2, 32);
4155
4156 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
4157 umulh(carry2, product_hi, yz_idx2);
4158
4159 // propagate sum of both multiplications into carry:tmp4:tmp3
4160 adds(tmp3, tmp3, carry);
4161 adc(tmp4, tmp4, zr);
4162 adds(tmp3, tmp3, rscratch1);
4163 adcs(tmp4, tmp4, tmp);
4164 adc(carry, carry2, zr);
4165 adds(tmp4, tmp4, rscratch2);
4166 adc(carry, carry, zr);
4167
4168 ror(tmp3, tmp3, 32); // convert little-endian to big-endian
4169 ror(tmp4, tmp4, 32);
4170 stp(tmp4, tmp3, Address(tmp6, 0));
4171
4172 b(L_third_loop);
4173 bind (L_third_loop_exit);
4174
4175 andw (idx, idx, 0x3);
4176 cbz(idx, L_post_third_loop_done);
4177
4178 Label L_check_1;
4179 subsw(idx, idx, 2);
4180 br(Assembler::MI, L_check_1);
4181
4182 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4183 ldr(yz_idx1, Address(rscratch1, 0));
4184 ror(yz_idx1, yz_idx1, 32);
4185 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4186 umulh(tmp4, product_hi, yz_idx1);
4187 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4188 ldr(yz_idx2, Address(rscratch1, 0));
4189 ror(yz_idx2, yz_idx2, 32);
4190
4191 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
4192
4193 ror(tmp3, tmp3, 32);
4194 str(tmp3, Address(rscratch1, 0));
4195
4196 bind (L_check_1);
4197
4198 andw (idx, idx, 0x1);
4199 subsw(idx, idx, 1);
4200 br(Assembler::MI, L_post_third_loop_done);
4201 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4202 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
4203 umulh(carry2, tmp4, product_hi);
4204 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4205
4206 add2_with_carry(carry2, tmp3, tmp4, carry);
4207
4208 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4209 extr(carry, carry2, tmp3, 32);
4210
4211 bind(L_post_third_loop_done);
4212 }
4213
4214 /**
4215 * Code for BigInteger::multiplyToLen() intrinsic.
4216 *
4217 * r0: x
4218 * r1: xlen
4219 * r2: y
4220 * r3: ylen
4221 * r4: z
4222 * r5: tmp0
4223 * r10: tmp1
4224 * r11: tmp2
4225 * r12: tmp3
4226 * r13: tmp4
4227 * r14: tmp5
4228 * r15: tmp6
4229 * r16: tmp7
4230 *
4231 */
4232 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
4233 Register z, Register tmp0,
4234 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
4235 Register tmp5, Register tmp6, Register product_hi) {
4236
4237 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
4238
4239 const Register idx = tmp1;
4240 const Register kdx = tmp2;
4241 const Register xstart = tmp3;
4242
4243 const Register y_idx = tmp4;
4244 const Register carry = tmp5;
4245 const Register product = xlen;
4246 const Register x_xstart = tmp0;
4247
4248 // First Loop.
4249 //
4250 // final static long LONG_MASK = 0xffffffffL;
4251 // int xstart = xlen - 1;
4252 // int ystart = ylen - 1;
4253 // long carry = 0;
4254 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4255 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
4256 // z[kdx] = (int)product;
4257 // carry = product >>> 32;
4258 // }
4259 // z[xstart] = (int)carry;
4260 //
4261
4262 movw(idx, ylen); // idx = ylen;
4263 addw(kdx, xlen, ylen); // kdx = xlen+ylen;
4264 mov(carry, zr); // carry = 0;
4265
4266 Label L_done;
4267
4268 movw(xstart, xlen);
4269 subsw(xstart, xstart, 1);
4270 br(Assembler::MI, L_done);
4271
4272 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
4273
4274 Label L_second_loop;
4275 cbzw(kdx, L_second_loop);
4276
4277 Label L_carry;
4278 subw(kdx, kdx, 1);
4279 cbzw(kdx, L_carry);
4280
4281 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4282 lsr(carry, carry, 32);
4283 subw(kdx, kdx, 1);
4284
4285 bind(L_carry);
4286 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4287
4288 // Second and third (nested) loops.
4289 //
4290 // for (int i = xstart-1; i >= 0; i--) { // Second loop
4291 // carry = 0;
4292 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
4293 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
4294 // (z[k] & LONG_MASK) + carry;
4295 // z[k] = (int)product;
4296 // carry = product >>> 32;
4297 // }
4298 // z[i] = (int)carry;
4299 // }
4300 //
4301 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
4302
4303 const Register jdx = tmp1;
4304
4305 bind(L_second_loop);
4306 mov(carry, zr); // carry = 0;
4307 movw(jdx, ylen); // j = ystart+1
4308
4309 subsw(xstart, xstart, 1); // i = xstart-1;
4310 br(Assembler::MI, L_done);
4311
4312 str(z, Address(pre(sp, -4 * wordSize)));
4313
4314 Label L_last_x;
4315 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
4316 subsw(xstart, xstart, 1); // i = xstart-1;
4317 br(Assembler::MI, L_last_x);
4318
4319 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4320 ldr(product_hi, Address(rscratch1));
4321 ror(product_hi, product_hi, 32); // convert big-endian to little-endian
4322
4323 Label L_third_loop_prologue;
4324 bind(L_third_loop_prologue);
4325
4326 str(ylen, Address(sp, wordSize));
4327 stp(x, xstart, Address(sp, 2 * wordSize));
4328 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4329 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4330 ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4331 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
4332
4333 addw(tmp3, xlen, 1);
4334 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4335 subsw(tmp3, tmp3, 1);
4336 br(Assembler::MI, L_done);
4337
4338 lsr(carry, carry, 32);
4339 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4340 b(L_second_loop);
4341
4342 // Next infrequent code is moved outside loops.
4343 bind(L_last_x);
4344 ldrw(product_hi, Address(x, 0));
4345 b(L_third_loop_prologue);
4346
4347 bind(L_done);
4348 }
4349
4350 // Code for BigInteger::mulAdd intrinsic
4351 // out = r0
4352 // in = r1
4353 // offset = r2 (already out.length-offset)
4354 // len = r3
4355 // k = r4
4356 //
4357 // pseudo code from java implementation:
4358 // carry = 0;
4359 // offset = out.length-offset - 1;
4360 // for (int j=len-1; j >= 0; j--) {
4361 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4362 // out[offset--] = (int)product;
4363 // carry = product >>> 32;
4364 // }
4365 // return (int)carry;
4366 void MacroAssembler::mul_add(Register out, Register in, Register offset,
4367 Register len, Register k) {
4368 Label LOOP, END;
4369 // pre-loop
4370 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4371 csel(out, zr, out, Assembler::EQ);
4372 br(Assembler::EQ, END);
4373 add(in, in, len, LSL, 2); // in[j+1] address
4374 add(offset, out, offset, LSL, 2); // out[offset + 1] address
4375 mov(out, zr); // used to keep carry now
4376 BIND(LOOP);
4377 ldrw(rscratch1, Address(pre(in, -4)));
4378 madd(rscratch1, rscratch1, k, out);
4379 ldrw(rscratch2, Address(pre(offset, -4)));
4380 add(rscratch1, rscratch1, rscratch2);
4381 strw(rscratch1, Address(offset));
4382 lsr(out, rscratch1, 32);
4383 subs(len, len, 1);
4384 br(Assembler::NE, LOOP);
4385 BIND(END);
4386 }
4387
4388 /**
4389 * Emits code to update CRC-32 with a byte value according to constants in table
4390 *
4391 * @param [in,out]crc Register containing the crc.
4392 * @param [in]val Register containing the byte to fold into the CRC.
4393 * @param [in]table Register containing the table of crc constants.
4394 *
4395 * uint32_t crc;
4396 * val = crc_table[(val ^ crc) & 0xFF];
4397 * crc = val ^ (crc >> 8);
4398 *
4399 */
4400 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4401 eor(val, val, crc);
4402 andr(val, val, 0xff);
4403 ldrw(val, Address(table, val, Address::lsl(2)));
4404 eor(crc, val, crc, Assembler::LSR, 8);
4405 }
4406
4407 /**
4408 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4409 *
4410 * @param [in,out]crc Register containing the crc.
4411 * @param [in]v Register containing the 32-bit to fold into the CRC.
4412 * @param [in]table0 Register containing table 0 of crc constants.
4413 * @param [in]table1 Register containing table 1 of crc constants.
4414 * @param [in]table2 Register containing table 2 of crc constants.
4415 * @param [in]table3 Register containing table 3 of crc constants.
4416 *
4417 * uint32_t crc;
4418 * v = crc ^ v
4419 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4420 *
4421 */
4422 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4423 Register table0, Register table1, Register table2, Register table3,
4424 bool upper) {
4425 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4426 uxtb(tmp, v);
4427 ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4428 ubfx(tmp, v, 8, 8);
4429 ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4430 eor(crc, crc, tmp);
4431 ubfx(tmp, v, 16, 8);
4432 ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4433 eor(crc, crc, tmp);
4434 ubfx(tmp, v, 24, 8);
4435 ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4436 eor(crc, crc, tmp);
4437 }
4438
4439 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4440 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4441 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4442 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4443
4444 subs(tmp0, len, 384);
4445 mvnw(crc, crc);
4446 br(Assembler::GE, CRC_by128_pre);
4447 BIND(CRC_less128);
4448 subs(len, len, 32);
4449 br(Assembler::GE, CRC_by32_loop);
4450 BIND(CRC_less32);
4451 adds(len, len, 32 - 4);
4452 br(Assembler::GE, CRC_by4_loop);
4453 adds(len, len, 4);
4454 br(Assembler::GT, CRC_by1_loop);
4455 b(L_exit);
4456
4457 BIND(CRC_by32_loop);
4458 ldp(tmp0, tmp1, Address(buf));
4459 crc32x(crc, crc, tmp0);
4460 ldp(tmp2, tmp3, Address(buf, 16));
4461 crc32x(crc, crc, tmp1);
4462 add(buf, buf, 32);
4463 crc32x(crc, crc, tmp2);
4464 subs(len, len, 32);
4465 crc32x(crc, crc, tmp3);
4466 br(Assembler::GE, CRC_by32_loop);
4467 cmn(len, (u1)32);
4468 br(Assembler::NE, CRC_less32);
4469 b(L_exit);
4470
4471 BIND(CRC_by4_loop);
4472 ldrw(tmp0, Address(post(buf, 4)));
4473 subs(len, len, 4);
4474 crc32w(crc, crc, tmp0);
4475 br(Assembler::GE, CRC_by4_loop);
4476 adds(len, len, 4);
4477 br(Assembler::LE, L_exit);
4478 BIND(CRC_by1_loop);
4479 ldrb(tmp0, Address(post(buf, 1)));
4480 subs(len, len, 1);
4481 crc32b(crc, crc, tmp0);
4482 br(Assembler::GT, CRC_by1_loop);
4483 b(L_exit);
4484
4485 BIND(CRC_by128_pre);
4486 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4487 4*256*sizeof(juint) + 8*sizeof(juint));
4488 mov(crc, 0);
4489 crc32x(crc, crc, tmp0);
4490 crc32x(crc, crc, tmp1);
4491
4492 cbnz(len, CRC_less128);
4493
4494 BIND(L_exit);
4495 mvnw(crc, crc);
4496 }
4497
4498 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4499 Register len, Register tmp0, Register tmp1, Register tmp2,
4500 Register tmp3) {
4501 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4502 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4503
4504 mvnw(crc, crc);
4505
4506 subs(len, len, 128);
4507 br(Assembler::GE, CRC_by64_pre);
4508 BIND(CRC_less64);
4509 adds(len, len, 128-32);
4510 br(Assembler::GE, CRC_by32_loop);
4511 BIND(CRC_less32);
4512 adds(len, len, 32-4);
4513 br(Assembler::GE, CRC_by4_loop);
4514 adds(len, len, 4);
4515 br(Assembler::GT, CRC_by1_loop);
4516 b(L_exit);
4517
4518 BIND(CRC_by32_loop);
4519 ldp(tmp0, tmp1, Address(post(buf, 16)));
4520 subs(len, len, 32);
4521 crc32x(crc, crc, tmp0);
4522 ldr(tmp2, Address(post(buf, 8)));
4523 crc32x(crc, crc, tmp1);
4524 ldr(tmp3, Address(post(buf, 8)));
4525 crc32x(crc, crc, tmp2);
4526 crc32x(crc, crc, tmp3);
4527 br(Assembler::GE, CRC_by32_loop);
4528 cmn(len, (u1)32);
4529 br(Assembler::NE, CRC_less32);
4530 b(L_exit);
4531
4532 BIND(CRC_by4_loop);
4533 ldrw(tmp0, Address(post(buf, 4)));
4534 subs(len, len, 4);
4535 crc32w(crc, crc, tmp0);
4536 br(Assembler::GE, CRC_by4_loop);
4537 adds(len, len, 4);
4538 br(Assembler::LE, L_exit);
4539 BIND(CRC_by1_loop);
4540 ldrb(tmp0, Address(post(buf, 1)));
4541 subs(len, len, 1);
4542 crc32b(crc, crc, tmp0);
4543 br(Assembler::GT, CRC_by1_loop);
4544 b(L_exit);
4545
4546 BIND(CRC_by64_pre);
4547 sub(buf, buf, 8);
4548 ldp(tmp0, tmp1, Address(buf, 8));
4549 crc32x(crc, crc, tmp0);
4550 ldr(tmp2, Address(buf, 24));
4551 crc32x(crc, crc, tmp1);
4552 ldr(tmp3, Address(buf, 32));
4553 crc32x(crc, crc, tmp2);
4554 ldr(tmp0, Address(buf, 40));
4555 crc32x(crc, crc, tmp3);
4556 ldr(tmp1, Address(buf, 48));
4557 crc32x(crc, crc, tmp0);
4558 ldr(tmp2, Address(buf, 56));
4559 crc32x(crc, crc, tmp1);
4560 ldr(tmp3, Address(pre(buf, 64)));
4561
4562 b(CRC_by64_loop);
4563
4564 align(CodeEntryAlignment);
4565 BIND(CRC_by64_loop);
4566 subs(len, len, 64);
4567 crc32x(crc, crc, tmp2);
4568 ldr(tmp0, Address(buf, 8));
4569 crc32x(crc, crc, tmp3);
4570 ldr(tmp1, Address(buf, 16));
4571 crc32x(crc, crc, tmp0);
4572 ldr(tmp2, Address(buf, 24));
4573 crc32x(crc, crc, tmp1);
4574 ldr(tmp3, Address(buf, 32));
4575 crc32x(crc, crc, tmp2);
4576 ldr(tmp0, Address(buf, 40));
4577 crc32x(crc, crc, tmp3);
4578 ldr(tmp1, Address(buf, 48));
4579 crc32x(crc, crc, tmp0);
4580 ldr(tmp2, Address(buf, 56));
4581 crc32x(crc, crc, tmp1);
4582 ldr(tmp3, Address(pre(buf, 64)));
4583 br(Assembler::GE, CRC_by64_loop);
4584
4585 // post-loop
4586 crc32x(crc, crc, tmp2);
4587 crc32x(crc, crc, tmp3);
4588
4589 sub(len, len, 64);
4590 add(buf, buf, 8);
4591 cmn(len, (u1)128);
4592 br(Assembler::NE, CRC_less64);
4593 BIND(L_exit);
4594 mvnw(crc, crc);
4595 }
4596
4597 /**
4598 * @param crc register containing existing CRC (32-bit)
4599 * @param buf register pointing to input byte buffer (byte*)
4600 * @param len register containing number of bytes
4601 * @param table register that will contain address of CRC table
4602 * @param tmp scratch register
4603 */
4604 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4605 Register table0, Register table1, Register table2, Register table3,
4606 Register tmp, Register tmp2, Register tmp3) {
4607 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4608
4609 if (UseCryptoPmullForCRC32) {
4610 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4611 return;
4612 }
4613
4614 if (UseCRC32) {
4615 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4616 return;
4617 }
4618
4619 mvnw(crc, crc);
4620
4621 {
4622 uint64_t offset;
4623 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4624 add(table0, table0, offset);
4625 }
4626 add(table1, table0, 1*256*sizeof(juint));
4627 add(table2, table0, 2*256*sizeof(juint));
4628 add(table3, table0, 3*256*sizeof(juint));
4629
4630 { // Neon code start
4631 cmp(len, (u1)64);
4632 br(Assembler::LT, L_by16);
4633 eor(v16, T16B, v16, v16);
4634
4635 Label L_fold;
4636
4637 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4638
4639 ld1(v0, v1, T2D, post(buf, 32));
4640 ld1r(v4, T2D, post(tmp, 8));
4641 ld1r(v5, T2D, post(tmp, 8));
4642 ld1r(v6, T2D, post(tmp, 8));
4643 ld1r(v7, T2D, post(tmp, 8));
4644 mov(v16, S, 0, crc);
4645
4646 eor(v0, T16B, v0, v16);
4647 sub(len, len, 64);
4648
4649 BIND(L_fold);
4650 pmull(v22, T8H, v0, v5, T8B);
4651 pmull(v20, T8H, v0, v7, T8B);
4652 pmull(v23, T8H, v0, v4, T8B);
4653 pmull(v21, T8H, v0, v6, T8B);
4654
4655 pmull2(v18, T8H, v0, v5, T16B);
4656 pmull2(v16, T8H, v0, v7, T16B);
4657 pmull2(v19, T8H, v0, v4, T16B);
4658 pmull2(v17, T8H, v0, v6, T16B);
4659
4660 uzp1(v24, T8H, v20, v22);
4661 uzp2(v25, T8H, v20, v22);
4662 eor(v20, T16B, v24, v25);
4663
4664 uzp1(v26, T8H, v16, v18);
4665 uzp2(v27, T8H, v16, v18);
4666 eor(v16, T16B, v26, v27);
4667
4668 ushll2(v22, T4S, v20, T8H, 8);
4669 ushll(v20, T4S, v20, T4H, 8);
4670
4671 ushll2(v18, T4S, v16, T8H, 8);
4672 ushll(v16, T4S, v16, T4H, 8);
4673
4674 eor(v22, T16B, v23, v22);
4675 eor(v18, T16B, v19, v18);
4676 eor(v20, T16B, v21, v20);
4677 eor(v16, T16B, v17, v16);
4678
4679 uzp1(v17, T2D, v16, v20);
4680 uzp2(v21, T2D, v16, v20);
4681 eor(v17, T16B, v17, v21);
4682
4683 ushll2(v20, T2D, v17, T4S, 16);
4684 ushll(v16, T2D, v17, T2S, 16);
4685
4686 eor(v20, T16B, v20, v22);
4687 eor(v16, T16B, v16, v18);
4688
4689 uzp1(v17, T2D, v20, v16);
4690 uzp2(v21, T2D, v20, v16);
4691 eor(v28, T16B, v17, v21);
4692
4693 pmull(v22, T8H, v1, v5, T8B);
4694 pmull(v20, T8H, v1, v7, T8B);
4695 pmull(v23, T8H, v1, v4, T8B);
4696 pmull(v21, T8H, v1, v6, T8B);
4697
4698 pmull2(v18, T8H, v1, v5, T16B);
4699 pmull2(v16, T8H, v1, v7, T16B);
4700 pmull2(v19, T8H, v1, v4, T16B);
4701 pmull2(v17, T8H, v1, v6, T16B);
4702
4703 ld1(v0, v1, T2D, post(buf, 32));
4704
4705 uzp1(v24, T8H, v20, v22);
4706 uzp2(v25, T8H, v20, v22);
4707 eor(v20, T16B, v24, v25);
4708
4709 uzp1(v26, T8H, v16, v18);
4710 uzp2(v27, T8H, v16, v18);
4711 eor(v16, T16B, v26, v27);
4712
4713 ushll2(v22, T4S, v20, T8H, 8);
4714 ushll(v20, T4S, v20, T4H, 8);
4715
4716 ushll2(v18, T4S, v16, T8H, 8);
4717 ushll(v16, T4S, v16, T4H, 8);
4718
4719 eor(v22, T16B, v23, v22);
4720 eor(v18, T16B, v19, v18);
4721 eor(v20, T16B, v21, v20);
4722 eor(v16, T16B, v17, v16);
4723
4724 uzp1(v17, T2D, v16, v20);
4725 uzp2(v21, T2D, v16, v20);
4726 eor(v16, T16B, v17, v21);
4727
4728 ushll2(v20, T2D, v16, T4S, 16);
4729 ushll(v16, T2D, v16, T2S, 16);
4730
4731 eor(v20, T16B, v22, v20);
4732 eor(v16, T16B, v16, v18);
4733
4734 uzp1(v17, T2D, v20, v16);
4735 uzp2(v21, T2D, v20, v16);
4736 eor(v20, T16B, v17, v21);
4737
4738 shl(v16, T2D, v28, 1);
4739 shl(v17, T2D, v20, 1);
4740
4741 eor(v0, T16B, v0, v16);
4742 eor(v1, T16B, v1, v17);
4743
4744 subs(len, len, 32);
4745 br(Assembler::GE, L_fold);
4746
4747 mov(crc, 0);
4748 mov(tmp, v0, D, 0);
4749 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4750 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4751 mov(tmp, v0, D, 1);
4752 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4753 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4754 mov(tmp, v1, D, 0);
4755 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4756 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4757 mov(tmp, v1, D, 1);
4758 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4759 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4760
4761 add(len, len, 32);
4762 } // Neon code end
4763
4764 BIND(L_by16);
4765 subs(len, len, 16);
4766 br(Assembler::GE, L_by16_loop);
4767 adds(len, len, 16-4);
4768 br(Assembler::GE, L_by4_loop);
4769 adds(len, len, 4);
4770 br(Assembler::GT, L_by1_loop);
4771 b(L_exit);
4772
4773 BIND(L_by4_loop);
4774 ldrw(tmp, Address(post(buf, 4)));
4775 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4776 subs(len, len, 4);
4777 br(Assembler::GE, L_by4_loop);
4778 adds(len, len, 4);
4779 br(Assembler::LE, L_exit);
4780 BIND(L_by1_loop);
4781 subs(len, len, 1);
4782 ldrb(tmp, Address(post(buf, 1)));
4783 update_byte_crc32(crc, tmp, table0);
4784 br(Assembler::GT, L_by1_loop);
4785 b(L_exit);
4786
4787 align(CodeEntryAlignment);
4788 BIND(L_by16_loop);
4789 subs(len, len, 16);
4790 ldp(tmp, tmp3, Address(post(buf, 16)));
4791 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4792 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4793 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4794 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4795 br(Assembler::GE, L_by16_loop);
4796 adds(len, len, 16-4);
4797 br(Assembler::GE, L_by4_loop);
4798 adds(len, len, 4);
4799 br(Assembler::GT, L_by1_loop);
4800 BIND(L_exit);
4801 mvnw(crc, crc);
4802 }
4803
4804 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4805 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4806 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4807 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4808
4809 subs(tmp0, len, 384);
4810 br(Assembler::GE, CRC_by128_pre);
4811 BIND(CRC_less128);
4812 subs(len, len, 32);
4813 br(Assembler::GE, CRC_by32_loop);
4814 BIND(CRC_less32);
4815 adds(len, len, 32 - 4);
4816 br(Assembler::GE, CRC_by4_loop);
4817 adds(len, len, 4);
4818 br(Assembler::GT, CRC_by1_loop);
4819 b(L_exit);
4820
4821 BIND(CRC_by32_loop);
4822 ldp(tmp0, tmp1, Address(buf));
4823 crc32cx(crc, crc, tmp0);
4824 ldr(tmp2, Address(buf, 16));
4825 crc32cx(crc, crc, tmp1);
4826 ldr(tmp3, Address(buf, 24));
4827 crc32cx(crc, crc, tmp2);
4828 add(buf, buf, 32);
4829 subs(len, len, 32);
4830 crc32cx(crc, crc, tmp3);
4831 br(Assembler::GE, CRC_by32_loop);
4832 cmn(len, (u1)32);
4833 br(Assembler::NE, CRC_less32);
4834 b(L_exit);
4835
4836 BIND(CRC_by4_loop);
4837 ldrw(tmp0, Address(post(buf, 4)));
4838 subs(len, len, 4);
4839 crc32cw(crc, crc, tmp0);
4840 br(Assembler::GE, CRC_by4_loop);
4841 adds(len, len, 4);
4842 br(Assembler::LE, L_exit);
4843 BIND(CRC_by1_loop);
4844 ldrb(tmp0, Address(post(buf, 1)));
4845 subs(len, len, 1);
4846 crc32cb(crc, crc, tmp0);
4847 br(Assembler::GT, CRC_by1_loop);
4848 b(L_exit);
4849
4850 BIND(CRC_by128_pre);
4851 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4852 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4853 mov(crc, 0);
4854 crc32cx(crc, crc, tmp0);
4855 crc32cx(crc, crc, tmp1);
4856
4857 cbnz(len, CRC_less128);
4858
4859 BIND(L_exit);
4860 }
4861
4862 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4863 Register len, Register tmp0, Register tmp1, Register tmp2,
4864 Register tmp3) {
4865 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4866 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4867
4868 subs(len, len, 128);
4869 br(Assembler::GE, CRC_by64_pre);
4870 BIND(CRC_less64);
4871 adds(len, len, 128-32);
4872 br(Assembler::GE, CRC_by32_loop);
4873 BIND(CRC_less32);
4874 adds(len, len, 32-4);
4875 br(Assembler::GE, CRC_by4_loop);
4876 adds(len, len, 4);
4877 br(Assembler::GT, CRC_by1_loop);
4878 b(L_exit);
4879
4880 BIND(CRC_by32_loop);
4881 ldp(tmp0, tmp1, Address(post(buf, 16)));
4882 subs(len, len, 32);
4883 crc32cx(crc, crc, tmp0);
4884 ldr(tmp2, Address(post(buf, 8)));
4885 crc32cx(crc, crc, tmp1);
4886 ldr(tmp3, Address(post(buf, 8)));
4887 crc32cx(crc, crc, tmp2);
4888 crc32cx(crc, crc, tmp3);
4889 br(Assembler::GE, CRC_by32_loop);
4890 cmn(len, (u1)32);
4891 br(Assembler::NE, CRC_less32);
4892 b(L_exit);
4893
4894 BIND(CRC_by4_loop);
4895 ldrw(tmp0, Address(post(buf, 4)));
4896 subs(len, len, 4);
4897 crc32cw(crc, crc, tmp0);
4898 br(Assembler::GE, CRC_by4_loop);
4899 adds(len, len, 4);
4900 br(Assembler::LE, L_exit);
4901 BIND(CRC_by1_loop);
4902 ldrb(tmp0, Address(post(buf, 1)));
4903 subs(len, len, 1);
4904 crc32cb(crc, crc, tmp0);
4905 br(Assembler::GT, CRC_by1_loop);
4906 b(L_exit);
4907
4908 BIND(CRC_by64_pre);
4909 sub(buf, buf, 8);
4910 ldp(tmp0, tmp1, Address(buf, 8));
4911 crc32cx(crc, crc, tmp0);
4912 ldr(tmp2, Address(buf, 24));
4913 crc32cx(crc, crc, tmp1);
4914 ldr(tmp3, Address(buf, 32));
4915 crc32cx(crc, crc, tmp2);
4916 ldr(tmp0, Address(buf, 40));
4917 crc32cx(crc, crc, tmp3);
4918 ldr(tmp1, Address(buf, 48));
4919 crc32cx(crc, crc, tmp0);
4920 ldr(tmp2, Address(buf, 56));
4921 crc32cx(crc, crc, tmp1);
4922 ldr(tmp3, Address(pre(buf, 64)));
4923
4924 b(CRC_by64_loop);
4925
4926 align(CodeEntryAlignment);
4927 BIND(CRC_by64_loop);
4928 subs(len, len, 64);
4929 crc32cx(crc, crc, tmp2);
4930 ldr(tmp0, Address(buf, 8));
4931 crc32cx(crc, crc, tmp3);
4932 ldr(tmp1, Address(buf, 16));
4933 crc32cx(crc, crc, tmp0);
4934 ldr(tmp2, Address(buf, 24));
4935 crc32cx(crc, crc, tmp1);
4936 ldr(tmp3, Address(buf, 32));
4937 crc32cx(crc, crc, tmp2);
4938 ldr(tmp0, Address(buf, 40));
4939 crc32cx(crc, crc, tmp3);
4940 ldr(tmp1, Address(buf, 48));
4941 crc32cx(crc, crc, tmp0);
4942 ldr(tmp2, Address(buf, 56));
4943 crc32cx(crc, crc, tmp1);
4944 ldr(tmp3, Address(pre(buf, 64)));
4945 br(Assembler::GE, CRC_by64_loop);
4946
4947 // post-loop
4948 crc32cx(crc, crc, tmp2);
4949 crc32cx(crc, crc, tmp3);
4950
4951 sub(len, len, 64);
4952 add(buf, buf, 8);
4953 cmn(len, (u1)128);
4954 br(Assembler::NE, CRC_less64);
4955 BIND(L_exit);
4956 }
4957
4958 /**
4959 * @param crc register containing existing CRC (32-bit)
4960 * @param buf register pointing to input byte buffer (byte*)
4961 * @param len register containing number of bytes
4962 * @param table register that will contain address of CRC table
4963 * @param tmp scratch register
4964 */
4965 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
4966 Register table0, Register table1, Register table2, Register table3,
4967 Register tmp, Register tmp2, Register tmp3) {
4968 if (UseCryptoPmullForCRC32) {
4969 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4970 } else {
4971 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
4972 }
4973 }
4974
4975 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
4976 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
4977 Label CRC_by128_loop;
4978 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4979
4980 sub(len, len, 256);
4981 Register table = tmp0;
4982 {
4983 uint64_t offset;
4984 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4985 add(table, table, offset);
4986 }
4987 add(table, table, table_offset);
4988
4989 // Registers v0..v7 are used as data registers.
4990 // Registers v16..v31 are used as tmp registers.
4991 sub(buf, buf, 0x10);
4992 ldrq(v0, Address(buf, 0x10));
4993 ldrq(v1, Address(buf, 0x20));
4994 ldrq(v2, Address(buf, 0x30));
4995 ldrq(v3, Address(buf, 0x40));
4996 ldrq(v4, Address(buf, 0x50));
4997 ldrq(v5, Address(buf, 0x60));
4998 ldrq(v6, Address(buf, 0x70));
4999 ldrq(v7, Address(pre(buf, 0x80)));
5000
5001 movi(v31, T4S, 0);
5002 mov(v31, S, 0, crc);
5003 eor(v0, T16B, v0, v31);
5004
5005 // Register v16 contains constants from the crc table.
5006 ldrq(v16, Address(table));
5007 b(CRC_by128_loop);
5008
5009 align(OptoLoopAlignment);
5010 BIND(CRC_by128_loop);
5011 pmull (v17, T1Q, v0, v16, T1D);
5012 pmull2(v18, T1Q, v0, v16, T2D);
5013 ldrq(v0, Address(buf, 0x10));
5014 eor3(v0, T16B, v17, v18, v0);
5015
5016 pmull (v19, T1Q, v1, v16, T1D);
5017 pmull2(v20, T1Q, v1, v16, T2D);
5018 ldrq(v1, Address(buf, 0x20));
5019 eor3(v1, T16B, v19, v20, v1);
5020
5021 pmull (v21, T1Q, v2, v16, T1D);
5022 pmull2(v22, T1Q, v2, v16, T2D);
5023 ldrq(v2, Address(buf, 0x30));
5024 eor3(v2, T16B, v21, v22, v2);
5025
5026 pmull (v23, T1Q, v3, v16, T1D);
5027 pmull2(v24, T1Q, v3, v16, T2D);
5028 ldrq(v3, Address(buf, 0x40));
5029 eor3(v3, T16B, v23, v24, v3);
5030
5031 pmull (v25, T1Q, v4, v16, T1D);
5032 pmull2(v26, T1Q, v4, v16, T2D);
5033 ldrq(v4, Address(buf, 0x50));
5034 eor3(v4, T16B, v25, v26, v4);
5035
5036 pmull (v27, T1Q, v5, v16, T1D);
5037 pmull2(v28, T1Q, v5, v16, T2D);
5038 ldrq(v5, Address(buf, 0x60));
5039 eor3(v5, T16B, v27, v28, v5);
5040
5041 pmull (v29, T1Q, v6, v16, T1D);
5042 pmull2(v30, T1Q, v6, v16, T2D);
5043 ldrq(v6, Address(buf, 0x70));
5044 eor3(v6, T16B, v29, v30, v6);
5045
5046 // Reuse registers v23, v24.
5047 // Using them won't block the first instruction of the next iteration.
5048 pmull (v23, T1Q, v7, v16, T1D);
5049 pmull2(v24, T1Q, v7, v16, T2D);
5050 ldrq(v7, Address(pre(buf, 0x80)));
5051 eor3(v7, T16B, v23, v24, v7);
5052
5053 subs(len, len, 0x80);
5054 br(Assembler::GE, CRC_by128_loop);
5055
5056 // fold into 512 bits
5057 // Use v31 for constants because v16 can be still in use.
5058 ldrq(v31, Address(table, 0x10));
5059
5060 pmull (v17, T1Q, v0, v31, T1D);
5061 pmull2(v18, T1Q, v0, v31, T2D);
5062 eor3(v0, T16B, v17, v18, v4);
5063
5064 pmull (v19, T1Q, v1, v31, T1D);
5065 pmull2(v20, T1Q, v1, v31, T2D);
5066 eor3(v1, T16B, v19, v20, v5);
5067
5068 pmull (v21, T1Q, v2, v31, T1D);
5069 pmull2(v22, T1Q, v2, v31, T2D);
5070 eor3(v2, T16B, v21, v22, v6);
5071
5072 pmull (v23, T1Q, v3, v31, T1D);
5073 pmull2(v24, T1Q, v3, v31, T2D);
5074 eor3(v3, T16B, v23, v24, v7);
5075
5076 // fold into 128 bits
5077 // Use v17 for constants because v31 can be still in use.
5078 ldrq(v17, Address(table, 0x20));
5079 pmull (v25, T1Q, v0, v17, T1D);
5080 pmull2(v26, T1Q, v0, v17, T2D);
5081 eor3(v3, T16B, v3, v25, v26);
5082
5083 // Use v18 for constants because v17 can be still in use.
5084 ldrq(v18, Address(table, 0x30));
5085 pmull (v27, T1Q, v1, v18, T1D);
5086 pmull2(v28, T1Q, v1, v18, T2D);
5087 eor3(v3, T16B, v3, v27, v28);
5088
5089 // Use v19 for constants because v18 can be still in use.
5090 ldrq(v19, Address(table, 0x40));
5091 pmull (v29, T1Q, v2, v19, T1D);
5092 pmull2(v30, T1Q, v2, v19, T2D);
5093 eor3(v0, T16B, v3, v29, v30);
5094
5095 add(len, len, 0x80);
5096 add(buf, buf, 0x10);
5097
5098 mov(tmp0, v0, D, 0);
5099 mov(tmp1, v0, D, 1);
5100 }
5101
5102 void MacroAssembler::addptr(const Address &dst, int32_t src) {
5103 Address adr;
5104 switch(dst.getMode()) {
5105 case Address::base_plus_offset:
5106 // This is the expected mode, although we allow all the other
5107 // forms below.
5108 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
5109 break;
5110 default:
5111 lea(rscratch2, dst);
5112 adr = Address(rscratch2);
5113 break;
5114 }
5115 ldr(rscratch1, adr);
5116 add(rscratch1, rscratch1, src);
5117 str(rscratch1, adr);
5118 }
5119
5120 void MacroAssembler::cmpptr(Register src1, Address src2) {
5121 uint64_t offset;
5122 adrp(rscratch1, src2, offset);
5123 ldr(rscratch1, Address(rscratch1, offset));
5124 cmp(src1, rscratch1);
5125 }
5126
5127 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
5128 cmp(obj1, obj2);
5129 }
5130
5131 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5132 load_method_holder(rresult, rmethod);
5133 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5134 }
5135
5136 void MacroAssembler::load_method_holder(Register holder, Register method) {
5137 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
5138 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5139 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5140 }
5141
5142 void MacroAssembler::load_metadata(Register dst, Register src) {
5143 if (UseCompactObjectHeaders) {
5144 load_narrow_klass_compact(dst, src);
5145 } else if (UseCompressedClassPointers) {
5146 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5147 } else {
5148 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5149 }
5150 }
5151
5152 // Loads the obj's Klass* into dst.
5153 // Preserves all registers (incl src, rscratch1 and rscratch2).
5154 // Input:
5155 // src - the oop we want to load the klass from.
5156 // dst - output narrow klass.
5157 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5158 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
5159 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5160 lsr(dst, dst, markWord::klass_shift);
5161 }
5162
5163 void MacroAssembler::load_klass(Register dst, Register src) {
5164 if (UseCompactObjectHeaders) {
5165 load_narrow_klass_compact(dst, src);
5166 decode_klass_not_null(dst);
5167 } else if (UseCompressedClassPointers) {
5168 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5169 decode_klass_not_null(dst);
5170 } else {
5171 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5172 }
5173 }
5174
5175 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
5176 if (RestoreMXCSROnJNICalls) {
5177 Label OK;
5178 get_fpcr(tmp1);
5179 mov(tmp2, tmp1);
5180 // Set FPCR to the state we need. We do want Round to Nearest. We
5181 // don't want non-IEEE rounding modes or floating-point traps.
5182 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
5183 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
5184 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
5185 eor(tmp2, tmp1, tmp2);
5186 cbz(tmp2, OK); // Only reset FPCR if it's wrong
5187 set_fpcr(tmp1);
5188 bind(OK);
5189 }
5190 }
5191
5192 // ((OopHandle)result).resolve();
5193 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
5194 // OopHandle::resolve is an indirection.
5195 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
5196 }
5197
5198 // ((WeakHandle)result).resolve();
5199 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
5200 assert_different_registers(result, tmp1, tmp2);
5201 Label resolved;
5202
5203 // A null weak handle resolves to null.
5204 cbz(result, resolved);
5205
5206 // Only 64 bit platforms support GCs that require a tmp register
5207 // WeakHandle::resolve is an indirection like jweak.
5208 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5209 result, Address(result), tmp1, tmp2);
5210 bind(resolved);
5211 }
5212
5213 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5214 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5215 ldr(dst, Address(rmethod, Method::const_offset()));
5216 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5217 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5218 ldr(dst, Address(dst, mirror_offset));
5219 resolve_oop_handle(dst, tmp1, tmp2);
5220 }
5221
5222 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5223 assert_different_registers(obj, klass, tmp);
5224 if (UseCompressedClassPointers) {
5225 if (UseCompactObjectHeaders) {
5226 load_narrow_klass_compact(tmp, obj);
5227 } else {
5228 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5229 }
5230 if (CompressedKlassPointers::base() == nullptr) {
5231 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5232 return;
5233 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5234 && CompressedKlassPointers::shift() == 0) {
5235 // Only the bottom 32 bits matter
5236 cmpw(klass, tmp);
5237 return;
5238 }
5239 decode_klass_not_null(tmp);
5240 } else {
5241 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5242 }
5243 cmp(klass, tmp);
5244 }
5245
5246 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5247 if (UseCompactObjectHeaders) {
5248 load_narrow_klass_compact(tmp1, obj1);
5249 load_narrow_klass_compact(tmp2, obj2);
5250 cmpw(tmp1, tmp2);
5251 } else if (UseCompressedClassPointers) {
5252 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5253 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5254 cmpw(tmp1, tmp2);
5255 } else {
5256 ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5257 ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5258 cmp(tmp1, tmp2);
5259 }
5260 }
5261
5262 void MacroAssembler::load_prototype_header(Register dst, Register src) {
5263 load_klass(dst, src);
5264 ldr(dst, Address(dst, Klass::prototype_header_offset()));
5265 }
5266
5267 void MacroAssembler::store_klass(Register dst, Register src) {
5268 // FIXME: Should this be a store release? concurrent gcs assumes
5269 // klass length is valid if klass field is not null.
5270 assert(!UseCompactObjectHeaders, "not with compact headers");
5271 if (UseCompressedClassPointers) {
5272 encode_klass_not_null(src);
5273 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5274 } else {
5275 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5276 }
5277 }
5278
5279 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5280 assert(!UseCompactObjectHeaders, "not with compact headers");
5281 if (UseCompressedClassPointers) {
5282 // Store to klass gap in destination
5283 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
5284 }
5285 }
5286
5287 // Algorithm must match CompressedOops::encode.
5288 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5289 #ifdef ASSERT
5290 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5291 #endif
5292 verify_oop_msg(s, "broken oop in encode_heap_oop");
5293 if (CompressedOops::base() == nullptr) {
5294 if (CompressedOops::shift() != 0) {
5295 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5296 lsr(d, s, LogMinObjAlignmentInBytes);
5297 } else {
5298 mov(d, s);
5299 }
5300 } else {
5301 subs(d, s, rheapbase);
5302 csel(d, d, zr, Assembler::HS);
5303 lsr(d, d, LogMinObjAlignmentInBytes);
5304
5305 /* Old algorithm: is this any worse?
5306 Label nonnull;
5307 cbnz(r, nonnull);
5308 sub(r, r, rheapbase);
5309 bind(nonnull);
5310 lsr(r, r, LogMinObjAlignmentInBytes);
5311 */
5312 }
5313 }
5314
5315 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5316 #ifdef ASSERT
5317 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5318 if (CheckCompressedOops) {
5319 Label ok;
5320 cbnz(r, ok);
5321 stop("null oop passed to encode_heap_oop_not_null");
5322 bind(ok);
5323 }
5324 #endif
5325 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5326 if (CompressedOops::base() != nullptr) {
5327 sub(r, r, rheapbase);
5328 }
5329 if (CompressedOops::shift() != 0) {
5330 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5331 lsr(r, r, LogMinObjAlignmentInBytes);
5332 }
5333 }
5334
5335 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5336 #ifdef ASSERT
5337 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5338 if (CheckCompressedOops) {
5339 Label ok;
5340 cbnz(src, ok);
5341 stop("null oop passed to encode_heap_oop_not_null2");
5342 bind(ok);
5343 }
5344 #endif
5345 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5346
5347 Register data = src;
5348 if (CompressedOops::base() != nullptr) {
5349 sub(dst, src, rheapbase);
5350 data = dst;
5351 }
5352 if (CompressedOops::shift() != 0) {
5353 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5354 lsr(dst, data, LogMinObjAlignmentInBytes);
5355 data = dst;
5356 }
5357 if (data == src)
5358 mov(dst, src);
5359 }
5360
5361 void MacroAssembler::decode_heap_oop(Register d, Register s) {
5362 #ifdef ASSERT
5363 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5364 #endif
5365 if (CompressedOops::base() == nullptr) {
5366 if (CompressedOops::shift() != 0) {
5367 lsl(d, s, CompressedOops::shift());
5368 } else if (d != s) {
5369 mov(d, s);
5370 }
5371 } else {
5372 Label done;
5373 if (d != s)
5374 mov(d, s);
5375 cbz(s, done);
5376 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5377 bind(done);
5378 }
5379 verify_oop_msg(d, "broken oop in decode_heap_oop");
5380 }
5381
5382 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5383 assert (UseCompressedOops, "should only be used for compressed headers");
5384 assert (Universe::heap() != nullptr, "java heap should be initialized");
5385 // Cannot assert, unverified entry point counts instructions (see .ad file)
5386 // vtableStubs also counts instructions in pd_code_size_limit.
5387 // Also do not verify_oop as this is called by verify_oop.
5388 if (CompressedOops::shift() != 0) {
5389 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5390 if (CompressedOops::base() != nullptr) {
5391 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5392 } else {
5393 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5394 }
5395 } else {
5396 assert (CompressedOops::base() == nullptr, "sanity");
5397 }
5398 }
5399
5400 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5401 assert (UseCompressedOops, "should only be used for compressed headers");
5402 assert (Universe::heap() != nullptr, "java heap should be initialized");
5403 // Cannot assert, unverified entry point counts instructions (see .ad file)
5404 // vtableStubs also counts instructions in pd_code_size_limit.
5405 // Also do not verify_oop as this is called by verify_oop.
5406 if (CompressedOops::shift() != 0) {
5407 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5408 if (CompressedOops::base() != nullptr) {
5409 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5410 } else {
5411 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5412 }
5413 } else {
5414 assert (CompressedOops::base() == nullptr, "sanity");
5415 if (dst != src) {
5416 mov(dst, src);
5417 }
5418 }
5419 }
5420
5421 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5422
5423 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5424 assert(Metaspace::initialized(), "metaspace not initialized yet");
5425 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
5426 return _klass_decode_mode;
5427 }
5428
5429 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
5430 assert(UseCompressedClassPointers, "not using compressed class pointers");
5431
5432 // KlassDecodeMode shouldn't be set already.
5433 assert(_klass_decode_mode == KlassDecodeNone, "set once");
5434
5435 if (base == nullptr) {
5436 return KlassDecodeZero;
5437 }
5438
5439 if (operand_valid_for_logical_immediate(
5440 /*is32*/false, (uint64_t)base)) {
5441 const uint64_t range_mask = right_n_bits(log2i_ceil(range));
5442 if (((uint64_t)base & range_mask) == 0) {
5443 return KlassDecodeXor;
5444 }
5445 }
5446
5447 const uint64_t shifted_base =
5448 (uint64_t)base >> shift;
5449 if ((shifted_base & 0xffff0000ffffffff) == 0) {
5450 return KlassDecodeMovk;
5451 }
5452
5453 // No valid encoding.
5454 return KlassDecodeNone;
5455 }
5456
5457 // Check if one of the above decoding modes will work for given base, shift and range.
5458 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
5459 return klass_decode_mode(base, shift, range) != KlassDecodeNone;
5460 }
5461
5462 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
5463 _klass_decode_mode = klass_decode_mode(base, shift, range);
5464 return _klass_decode_mode != KlassDecodeNone;
5465 }
5466
5467 static Register pick_different_tmp(Register dst, Register src) {
5468 auto tmps = RegSet::of(r0, r1, r2) - RegSet::of(src, dst);
5469 return *tmps.begin();
5470 }
5471
5472 void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) {
5473 // we have to load the klass base from the AOT constants area but
5474 // not the shift because it is not allowed to change
5475 int shift = CompressedKlassPointers::shift();
5476 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5477 if (dst != src) {
5478 // we can load the base into dst, subtract it formthe src and shift down
5479 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5480 ldr(dst, dst);
5481 sub(dst, src, dst);
5482 lsr(dst, dst, shift);
5483 } else {
5484 // we need an extra register in order to load the coop base
5485 Register tmp = pick_different_tmp(dst, src);
5486 RegSet regs = RegSet::of(tmp);
5487 push(regs, sp);
5488 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5489 ldr(tmp, tmp);
5490 sub(dst, src, tmp);
5491 lsr(dst, dst, shift);
5492 pop(regs, sp);
5493 }
5494 }
5495
5496 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5497 if (AOTCodeCache::is_on_for_dump()) {
5498 encode_klass_not_null_for_aot(dst, src);
5499 return;
5500 }
5501
5502 switch (klass_decode_mode()) {
5503 case KlassDecodeZero:
5504 if (CompressedKlassPointers::shift() != 0) {
5505 lsr(dst, src, CompressedKlassPointers::shift());
5506 } else {
5507 if (dst != src) mov(dst, src);
5508 }
5509 break;
5510
5511 case KlassDecodeXor:
5512 if (CompressedKlassPointers::shift() != 0) {
5513 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5514 lsr(dst, dst, CompressedKlassPointers::shift());
5515 } else {
5516 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5517 }
5518 break;
5519
5520 case KlassDecodeMovk:
5521 if (CompressedKlassPointers::shift() != 0) {
5522 ubfx(dst, src, CompressedKlassPointers::shift(), 32);
5523 } else {
5524 movw(dst, src);
5525 }
5526 break;
5527
5528 case KlassDecodeNone:
5529 ShouldNotReachHere();
5530 break;
5531 }
5532 }
5533
5534 void MacroAssembler::encode_klass_not_null(Register r) {
5535 encode_klass_not_null(r, r);
5536 }
5537
5538 void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
5539 // we have to load the klass base from the AOT constants area but
5540 // not the shift because it is not allowed to change
5541 int shift = CompressedKlassPointers::shift();
5542 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5543 if (dst != src) {
5544 // we can load the base into dst then add the offset with a suitable shift
5545 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5546 ldr(dst, dst);
5547 add(dst, dst, src, LSL, shift);
5548 } else {
5549 // we need an extra register in order to load the coop base
5550 Register tmp = pick_different_tmp(dst, src);
5551 RegSet regs = RegSet::of(tmp);
5552 push(regs, sp);
5553 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5554 ldr(tmp, tmp);
5555 add(dst, tmp, src, LSL, shift);
5556 pop(regs, sp);
5557 }
5558 }
5559
5560 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5561 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5562
5563 if (AOTCodeCache::is_on_for_dump()) {
5564 decode_klass_not_null_for_aot(dst, src);
5565 return;
5566 }
5567
5568 switch (klass_decode_mode()) {
5569 case KlassDecodeZero:
5570 if (CompressedKlassPointers::shift() != 0) {
5571 lsl(dst, src, CompressedKlassPointers::shift());
5572 } else {
5573 if (dst != src) mov(dst, src);
5574 }
5575 break;
5576
5577 case KlassDecodeXor:
5578 if (CompressedKlassPointers::shift() != 0) {
5579 lsl(dst, src, CompressedKlassPointers::shift());
5580 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5581 } else {
5582 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5583 }
5584 break;
5585
5586 case KlassDecodeMovk: {
5587 const uint64_t shifted_base =
5588 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5589
5590 if (dst != src) movw(dst, src);
5591 movk(dst, shifted_base >> 32, 32);
5592
5593 if (CompressedKlassPointers::shift() != 0) {
5594 lsl(dst, dst, CompressedKlassPointers::shift());
5595 }
5596
5597 break;
5598 }
5599
5600 case KlassDecodeNone:
5601 ShouldNotReachHere();
5602 break;
5603 }
5604 }
5605
5606 void MacroAssembler::decode_klass_not_null(Register r) {
5607 decode_klass_not_null(r, r);
5608 }
5609
5610 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5611 #ifdef ASSERT
5612 {
5613 ThreadInVMfromUnknown tiv;
5614 assert (UseCompressedOops, "should only be used for compressed oops");
5615 assert (Universe::heap() != nullptr, "java heap should be initialized");
5616 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5617 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5618 }
5619 #endif
5620 int oop_index = oop_recorder()->find_index(obj);
5621 InstructionMark im(this);
5622 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5623 code_section()->relocate(inst_mark(), rspec);
5624 movz(dst, 0xDEAD, 16);
5625 movk(dst, 0xBEEF);
5626 }
5627
5628 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5629 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5630 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5631 int index = oop_recorder()->find_index(k);
5632 assert(! Universe::heap()->is_in(k), "should not be an oop");
5633
5634 InstructionMark im(this);
5635 RelocationHolder rspec = metadata_Relocation::spec(index);
5636 code_section()->relocate(inst_mark(), rspec);
5637 narrowKlass nk = CompressedKlassPointers::encode(k);
5638 movz(dst, (nk >> 16), 16);
5639 movk(dst, nk & 0xffff);
5640 }
5641
5642 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5643 Register dst, Address src,
5644 Register tmp1, Register tmp2) {
5645 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5646 decorators = AccessInternal::decorator_fixup(decorators, type);
5647 bool as_raw = (decorators & AS_RAW) != 0;
5648 if (as_raw) {
5649 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5650 } else {
5651 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5652 }
5653 }
5654
5655 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5656 Address dst, Register val,
5657 Register tmp1, Register tmp2, Register tmp3) {
5658 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5659 decorators = AccessInternal::decorator_fixup(decorators, type);
5660 bool as_raw = (decorators & AS_RAW) != 0;
5661 if (as_raw) {
5662 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5663 } else {
5664 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5665 }
5666 }
5667
5668 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5669 Register inline_layout_info) {
5670 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5671 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5672 }
5673
5674 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5675 ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
5676 ldrw(offset, Address(offset, InlineKlass::payload_offset_offset()));
5677 }
5678
5679 void MacroAssembler::payload_address(Register oop, Register data, Register inline_klass) {
5680 // ((address) (void*) o) + vk->payload_offset();
5681 Register offset = (data == oop) ? rscratch1 : data;
5682 payload_offset(inline_klass, offset);
5683 if (data == oop) {
5684 add(data, data, offset);
5685 } else {
5686 lea(data, Address(oop, offset));
5687 }
5688 }
5689
5690 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5691 Register index, Register data) {
5692 assert_different_registers(array, array_klass, index);
5693 assert_different_registers(rscratch1, array, index);
5694
5695 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5696 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
5697
5698 // Klass::layout_helper_log2_element_size(lh)
5699 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5700 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
5701 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
5702 lslv(index, index, rscratch1);
5703
5704 add(data, array, index);
5705 add(data, data, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT));
5706 }
5707
5708 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5709 Register tmp2, DecoratorSet decorators) {
5710 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5711 }
5712
5713 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5714 Register tmp2, DecoratorSet decorators) {
5715 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5716 }
5717
5718 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5719 Register tmp2, Register tmp3, DecoratorSet decorators) {
5720 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5721 }
5722
5723 // Used for storing nulls.
5724 void MacroAssembler::store_heap_oop_null(Address dst) {
5725 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5726 }
5727
5728 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5729 assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5730 int index = oop_recorder()->allocate_metadata_index(obj);
5731 RelocationHolder rspec = metadata_Relocation::spec(index);
5732 return Address((address)obj, rspec);
5733 }
5734
5735 // Move an oop into a register.
5736 void MacroAssembler::movoop(Register dst, jobject obj) {
5737 int oop_index;
5738 if (obj == nullptr) {
5739 oop_index = oop_recorder()->allocate_oop_index(obj);
5740 } else {
5741 #ifdef ASSERT
5742 {
5743 ThreadInVMfromUnknown tiv;
5744 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5745 }
5746 #endif
5747 oop_index = oop_recorder()->find_index(obj);
5748 }
5749 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5750
5751 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5752 mov(dst, Address((address)obj, rspec));
5753 } else {
5754 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5755 ldr(dst, Address(dummy, rspec));
5756 }
5757 }
5758
5759 // Move a metadata address into a register.
5760 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5761 int oop_index;
5762 if (obj == nullptr) {
5763 oop_index = oop_recorder()->allocate_metadata_index(obj);
5764 } else {
5765 oop_index = oop_recorder()->find_index(obj);
5766 }
5767 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5768 mov(dst, Address((address)obj, rspec));
5769 }
5770
5771 Address MacroAssembler::constant_oop_address(jobject obj) {
5772 #ifdef ASSERT
5773 {
5774 ThreadInVMfromUnknown tiv;
5775 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5776 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5777 }
5778 #endif
5779 int oop_index = oop_recorder()->find_index(obj);
5780 return Address((address)obj, oop_Relocation::spec(oop_index));
5781 }
5782
5783 // Object / value buffer allocation...
5784 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
5785 Register t1, Register t2,
5786 bool clear_fields, Label& alloc_failed)
5787 {
5788 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
5789 Register layout_size = t1;
5790 assert(new_obj == r0, "needs to be r0");
5791 assert_different_registers(klass, new_obj, t1, t2);
5792
5793 // get instance_size in InstanceKlass (scaled to a count of bytes)
5794 ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
5795 // test to see if it is malformed in some way
5796 tst(layout_size, Klass::_lh_instance_slow_path_bit);
5797 br(Assembler::NE, slow_case_no_pop);
5798
5799 // Allocate the instance:
5800 // If TLAB is enabled:
5801 // Try to allocate in the TLAB.
5802 // If fails, go to the slow path.
5803 // Initialize the allocation.
5804 // Exit.
5805 //
5806 // Go to slow path.
5807
5808 if (UseTLAB) {
5809 push(klass);
5810 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
5811 if (ZeroTLAB || (!clear_fields)) {
5812 // the fields have been already cleared
5813 b(initialize_header);
5814 } else {
5815 // initialize both the header and fields
5816 b(initialize_object);
5817 }
5818
5819 if (clear_fields) {
5820 // The object is initialized before the header. If the object size is
5821 // zero, go directly to the header initialization.
5822 bind(initialize_object);
5823 int header_size = oopDesc::header_size() * HeapWordSize;
5824 assert(is_aligned(header_size, BytesPerLong), "oop header size must be 8-byte-aligned");
5825 subs(layout_size, layout_size, header_size);
5826 br(Assembler::EQ, initialize_header);
5827
5828 // Initialize topmost object field, divide size by 8, check if odd and
5829 // test if zero.
5830
5831 #ifdef ASSERT
5832 // make sure instance_size was multiple of 8
5833 Label L;
5834 tst(layout_size, 7);
5835 br(Assembler::EQ, L);
5836 stop("object size is not multiple of 8 - adjust this code");
5837 bind(L);
5838 // must be > 0, no extra check needed here
5839 #endif
5840
5841 lsr(layout_size, layout_size, LogBytesPerLong);
5842
5843 // initialize remaining object fields: instance_size was a multiple of 8
5844 {
5845 Label loop;
5846 Register base = t2;
5847
5848 bind(loop);
5849 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
5850 str(zr, Address(rscratch1, header_size - 1*oopSize));
5851 subs(layout_size, layout_size, 1);
5852 br(Assembler::NE, loop);
5853 }
5854 } // clear_fields
5855
5856 // initialize object header only.
5857 bind(initialize_header);
5858 pop(klass);
5859 Register mark_word = t2;
5860 if (UseCompactObjectHeaders || EnableValhalla) {
5861 ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
5862 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5863 } else {
5864 mov(mark_word, (intptr_t)markWord::prototype().value());
5865 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5866 }
5867 if (!UseCompactObjectHeaders) {
5868 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops
5869 mov(t2, klass); // preserve klass
5870 store_klass(new_obj, t2); // src klass reg is potentially compressed
5871 }
5872 b(done);
5873 }
5874
5875 if (UseTLAB) {
5876 bind(slow_case);
5877 pop(klass);
5878 }
5879 bind(slow_case_no_pop);
5880 b(alloc_failed);
5881
5882 bind(done);
5883 }
5884
5885 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5886 void MacroAssembler::tlab_allocate(Register obj,
5887 Register var_size_in_bytes,
5888 int con_size_in_bytes,
5889 Register t1,
5890 Register t2,
5891 Label& slow_case) {
5892 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5893 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5894 }
5895
5896 void MacroAssembler::verify_tlab() {
5897 #ifdef ASSERT
5898 if (UseTLAB && VerifyOops) {
5899 Label next, ok;
5900
5901 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5902
5903 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5904 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5905 cmp(rscratch2, rscratch1);
5906 br(Assembler::HS, next);
5907 STOP("assert(top >= start)");
5908 should_not_reach_here();
5909
5910 bind(next);
5911 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5912 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5913 cmp(rscratch2, rscratch1);
5914 br(Assembler::HS, ok);
5915 STOP("assert(top <= end)");
5916 should_not_reach_here();
5917
5918 bind(ok);
5919 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5920 }
5921 #endif
5922 }
5923
5924 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
5925 inline_layout_info(holder_klass, index, inline_klass);
5926 ldr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
5927 }
5928
5929 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
5930 assert_different_registers(holder_klass, index, layout_info);
5931 InlineLayoutInfo array[2];
5932 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
5933 if (is_power_of_2(size)) {
5934 lsl(index, index, log2i_exact(size)); // Scale index by power of 2
5935 } else {
5936 mov(layout_info, size);
5937 mul(index, index, layout_info); // Scale the index to be the entry index * array_element_size
5938 }
5939 ldr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
5940 add(layout_info, layout_info, Array<InlineLayoutInfo>::base_offset_in_bytes());
5941 lea(layout_info, Address(layout_info, index));
5942 }
5943
5944 // Writes to stack successive pages until offset reached to check for
5945 // stack overflow + shadow pages. This clobbers tmp.
5946 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5947 assert_different_registers(tmp, size, rscratch1);
5948 mov(tmp, sp);
5949 // Bang stack for total size given plus shadow page size.
5950 // Bang one page at a time because large size can bang beyond yellow and
5951 // red zones.
5952 Label loop;
5953 mov(rscratch1, (int)os::vm_page_size());
5954 bind(loop);
5955 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5956 subsw(size, size, rscratch1);
5957 str(size, Address(tmp));
5958 br(Assembler::GT, loop);
5959
5960 // Bang down shadow pages too.
5961 // At this point, (tmp-0) is the last address touched, so don't
5962 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5963 // was post-decremented.) Skip this address by starting at i=1, and
5964 // touch a few more pages below. N.B. It is important to touch all
5965 // the way down to and including i=StackShadowPages.
5966 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5967 // this could be any sized move but this is can be a debugging crumb
5968 // so the bigger the better.
5969 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5970 str(size, Address(tmp));
5971 }
5972 }
5973
5974 // Move the address of the polling page into dest.
5975 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
5976 ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
5977 }
5978
5979 // Read the polling page. The address of the polling page must
5980 // already be in r.
5981 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
5982 address mark;
5983 {
5984 InstructionMark im(this);
5985 code_section()->relocate(inst_mark(), rtype);
5986 ldrw(zr, Address(r, 0));
5987 mark = inst_mark();
5988 }
5989 verify_cross_modify_fence_not_required();
5990 return mark;
5991 }
5992
5993 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
5994 relocInfo::relocType rtype = dest.rspec().reloc()->type();
5995 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
5996 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
5997 uint64_t dest_page = (uint64_t)dest.target() >> 12;
5998 int64_t offset_low = dest_page - low_page;
5999 int64_t offset_high = dest_page - high_page;
6000
6001 assert(is_valid_AArch64_address(dest.target()), "bad address");
6002 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
6003
6004 InstructionMark im(this);
6005 code_section()->relocate(inst_mark(), dest.rspec());
6006 // 8143067: Ensure that the adrp can reach the dest from anywhere within
6007 // the code cache so that if it is relocated we know it will still reach
6008 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
6009 _adrp(reg1, dest.target());
6010 } else {
6011 uint64_t target = (uint64_t)dest.target();
6012 uint64_t adrp_target
6013 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
6014
6015 _adrp(reg1, (address)adrp_target);
6016 movk(reg1, target >> 32, 32);
6017 }
6018 byte_offset = (uint64_t)dest.target() & 0xfff;
6019 }
6020
6021 void MacroAssembler::load_byte_map_base(Register reg) {
6022 CardTable::CardValue* byte_map_base =
6023 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
6024
6025 // Strictly speaking the byte_map_base isn't an address at all, and it might
6026 // even be negative. It is thus materialised as a constant.
6027 mov(reg, (uint64_t)byte_map_base);
6028 }
6029
6030 void MacroAssembler::build_frame(int framesize) {
6031 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
6032 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6033 protect_return_address();
6034 if (framesize < ((1 << 9) + 2 * wordSize)) {
6035 sub(sp, sp, framesize);
6036 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6037 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
6038 } else {
6039 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6040 if (PreserveFramePointer) mov(rfp, sp);
6041 if (framesize < ((1 << 12) + 2 * wordSize))
6042 sub(sp, sp, framesize - 2 * wordSize);
6043 else {
6044 mov(rscratch1, framesize - 2 * wordSize);
6045 sub(sp, sp, rscratch1);
6046 }
6047 }
6048 verify_cross_modify_fence_not_required();
6049 }
6050
6051 void MacroAssembler::remove_frame(int framesize) {
6052 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
6053 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6054 if (framesize < ((1 << 9) + 2 * wordSize)) {
6055 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6056 add(sp, sp, framesize);
6057 } else {
6058 if (framesize < ((1 << 12) + 2 * wordSize))
6059 add(sp, sp, framesize - 2 * wordSize);
6060 else {
6061 mov(rscratch1, framesize - 2 * wordSize);
6062 add(sp, sp, rscratch1);
6063 }
6064 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6065 }
6066 authenticate_return_address();
6067 }
6068
6069 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6070 if (needs_stack_repair) {
6071 // Remove the extension of the caller's frame used for inline type unpacking
6072 //
6073 // Right now the stack looks like this:
6074 //
6075 // | Arguments from caller |
6076 // |---------------------------| <-- caller's SP
6077 // | Saved LR #1 |
6078 // | Saved FP #1 |
6079 // |---------------------------|
6080 // | Extension space for |
6081 // | inline arg (un)packing |
6082 // |---------------------------| <-- start of this method's frame
6083 // | Saved LR #2 |
6084 // | Saved FP #2 |
6085 // |---------------------------| <-- FP
6086 // | sp_inc |
6087 // | method locals |
6088 // |---------------------------| <-- SP
6089 //
6090 // There are two copies of FP and LR on the stack. They will be identical at
6091 // first, but that can change.
6092 // If the caller has been deoptimized, LR #1 will be patched to point at the
6093 // deopt blob, and LR #2 will still point into the old method.
6094 // If the saved FP (x29) was not used as the frame pointer, but to store an
6095 // oop, the GC will be aware only of FP #2 as the spilled location of x29 and
6096 // will fix only this one.
6097 //
6098 // When restoring, one must then load FP #2 into x29, and LR #1 into x30,
6099 // while keeping in mind that from the scalarized entry point, there will be
6100 // only one copy of each.
6101 //
6102 // The sp_inc stack slot holds the total size of the frame including the
6103 // extension space minus two words for the saved FP and LR. That is how to
6104 // find LR #1. FP #2 is always located just after sp_inc.
6105
6106 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP
6107
6108 ldr(rscratch1, Address(sp, sp_inc_offset));
6109 ldr(rfp, Address(sp, sp_inc_offset + wordSize));
6110 add(sp, sp, rscratch1);
6111 ldr(lr, Address(sp, wordSize));
6112 add(sp, sp, 2 * wordSize);
6113 } else {
6114 remove_frame(initial_framesize);
6115 }
6116 }
6117
6118 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
6119 int real_frame_size = frame_size + sp_inc;
6120 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
6121 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
6122 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6123
6124 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP
6125
6126 // Subtract two words for the saved FP and LR as these will be popped
6127 // separately. See remove_frame above.
6128 mov(rscratch1, real_frame_size - 2*wordSize);
6129 str(rscratch1, Address(sp, sp_inc_offset));
6130 }
6131
6132 // This method counts leading positive bytes (highest bit not set) in provided byte array
6133 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
6134 // Simple and most common case of aligned small array which is not at the
6135 // end of memory page is placed here. All other cases are in stub.
6136 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
6137 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
6138 assert_different_registers(ary1, len, result);
6139
6140 mov(result, len);
6141 cmpw(len, 0);
6142 br(LE, DONE);
6143 cmpw(len, 4 * wordSize);
6144 br(GE, STUB_LONG); // size > 32 then go to stub
6145
6146 int shift = 64 - exact_log2(os::vm_page_size());
6147 lsl(rscratch1, ary1, shift);
6148 mov(rscratch2, (size_t)(4 * wordSize) << shift);
6149 adds(rscratch2, rscratch1, rscratch2); // At end of page?
6150 br(CS, STUB); // at the end of page then go to stub
6151 subs(len, len, wordSize);
6152 br(LT, END);
6153
6154 BIND(LOOP);
6155 ldr(rscratch1, Address(post(ary1, wordSize)));
6156 tst(rscratch1, UPPER_BIT_MASK);
6157 br(NE, SET_RESULT);
6158 subs(len, len, wordSize);
6159 br(GE, LOOP);
6160 cmpw(len, -wordSize);
6161 br(EQ, DONE);
6162
6163 BIND(END);
6164 ldr(rscratch1, Address(ary1));
6165 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
6166 lslv(rscratch1, rscratch1, rscratch2);
6167 tst(rscratch1, UPPER_BIT_MASK);
6168 br(NE, SET_RESULT);
6169 b(DONE);
6170
6171 BIND(STUB);
6172 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
6173 assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
6174 address tpc1 = trampoline_call(count_pos);
6175 if (tpc1 == nullptr) {
6176 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
6177 postcond(pc() == badAddress);
6178 return nullptr;
6179 }
6180 b(DONE);
6181
6182 BIND(STUB_LONG);
6183 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
6184 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
6185 address tpc2 = trampoline_call(count_pos_long);
6186 if (tpc2 == nullptr) {
6187 DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
6188 postcond(pc() == badAddress);
6189 return nullptr;
6190 }
6191 b(DONE);
6192
6193 BIND(SET_RESULT);
6194
6195 add(len, len, wordSize);
6196 sub(result, result, len);
6197
6198 BIND(DONE);
6199 postcond(pc() != badAddress);
6200 return pc();
6201 }
6202
6203 // Clobbers: rscratch1, rscratch2, rflags
6204 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
6205 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
6206 Register tmp4, Register tmp5, Register result,
6207 Register cnt1, int elem_size) {
6208 Label DONE, SAME;
6209 Register tmp1 = rscratch1;
6210 Register tmp2 = rscratch2;
6211 int elem_per_word = wordSize/elem_size;
6212 int log_elem_size = exact_log2(elem_size);
6213 int klass_offset = arrayOopDesc::klass_offset_in_bytes();
6214 int length_offset = arrayOopDesc::length_offset_in_bytes();
6215 int base_offset
6216 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
6217 // When the length offset is not aligned to 8 bytes,
6218 // then we align it down. This is valid because the new
6219 // offset will always be the klass which is the same
6220 // for type arrays.
6221 int start_offset = align_down(length_offset, BytesPerWord);
6222 int extra_length = base_offset - start_offset;
6223 assert(start_offset == length_offset || start_offset == klass_offset,
6224 "start offset must be 8-byte-aligned or be the klass offset");
6225 assert(base_offset != start_offset, "must include the length field");
6226 extra_length = extra_length / elem_size; // We count in elements, not bytes.
6227 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
6228
6229 assert(elem_size == 1 || elem_size == 2, "must be char or byte");
6230 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6231
6232 #ifndef PRODUCT
6233 {
6234 const char kind = (elem_size == 2) ? 'U' : 'L';
6235 char comment[64];
6236 os::snprintf_checked(comment, sizeof comment, "array_equals%c{", kind);
6237 BLOCK_COMMENT(comment);
6238 }
6239 #endif
6240
6241 // if (a1 == a2)
6242 // return true;
6243 cmpoop(a1, a2); // May have read barriers for a1 and a2.
6244 br(EQ, SAME);
6245
6246 if (UseSimpleArrayEquals) {
6247 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
6248 // if (a1 == nullptr || a2 == nullptr)
6249 // return false;
6250 // a1 & a2 == 0 means (some-pointer is null) or
6251 // (very-rare-or-even-probably-impossible-pointer-values)
6252 // so, we can save one branch in most cases
6253 tst(a1, a2);
6254 mov(result, false);
6255 br(EQ, A_MIGHT_BE_NULL);
6256 // if (a1.length != a2.length)
6257 // return false;
6258 bind(A_IS_NOT_NULL);
6259 ldrw(cnt1, Address(a1, length_offset));
6260 // Increase loop counter by diff between base- and actual start-offset.
6261 addw(cnt1, cnt1, extra_length);
6262 lea(a1, Address(a1, start_offset));
6263 lea(a2, Address(a2, start_offset));
6264 // Check for short strings, i.e. smaller than wordSize.
6265 subs(cnt1, cnt1, elem_per_word);
6266 br(Assembler::LT, SHORT);
6267 // Main 8 byte comparison loop.
6268 bind(NEXT_WORD); {
6269 ldr(tmp1, Address(post(a1, wordSize)));
6270 ldr(tmp2, Address(post(a2, wordSize)));
6271 subs(cnt1, cnt1, elem_per_word);
6272 eor(tmp5, tmp1, tmp2);
6273 cbnz(tmp5, DONE);
6274 } br(GT, NEXT_WORD);
6275 // Last longword. In the case where length == 4 we compare the
6276 // same longword twice, but that's still faster than another
6277 // conditional branch.
6278 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6279 // length == 4.
6280 if (log_elem_size > 0)
6281 lsl(cnt1, cnt1, log_elem_size);
6282 ldr(tmp3, Address(a1, cnt1));
6283 ldr(tmp4, Address(a2, cnt1));
6284 eor(tmp5, tmp3, tmp4);
6285 cbnz(tmp5, DONE);
6286 b(SAME);
6287 bind(A_MIGHT_BE_NULL);
6288 // in case both a1 and a2 are not-null, proceed with loads
6289 cbz(a1, DONE);
6290 cbz(a2, DONE);
6291 b(A_IS_NOT_NULL);
6292 bind(SHORT);
6293
6294 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
6295 {
6296 ldrw(tmp1, Address(post(a1, 4)));
6297 ldrw(tmp2, Address(post(a2, 4)));
6298 eorw(tmp5, tmp1, tmp2);
6299 cbnzw(tmp5, DONE);
6300 }
6301 bind(TAIL03);
6302 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
6303 {
6304 ldrh(tmp3, Address(post(a1, 2)));
6305 ldrh(tmp4, Address(post(a2, 2)));
6306 eorw(tmp5, tmp3, tmp4);
6307 cbnzw(tmp5, DONE);
6308 }
6309 bind(TAIL01);
6310 if (elem_size == 1) { // Only needed when comparing byte arrays.
6311 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6312 {
6313 ldrb(tmp1, a1);
6314 ldrb(tmp2, a2);
6315 eorw(tmp5, tmp1, tmp2);
6316 cbnzw(tmp5, DONE);
6317 }
6318 }
6319 } else {
6320 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
6321 CSET_EQ, LAST_CHECK;
6322 mov(result, false);
6323 cbz(a1, DONE);
6324 ldrw(cnt1, Address(a1, length_offset));
6325 cbz(a2, DONE);
6326 // Increase loop counter by diff between base- and actual start-offset.
6327 addw(cnt1, cnt1, extra_length);
6328
6329 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
6330 // faster to perform another branch before comparing a1 and a2
6331 cmp(cnt1, (u1)elem_per_word);
6332 br(LE, SHORT); // short or same
6333 ldr(tmp3, Address(pre(a1, start_offset)));
6334 subs(zr, cnt1, stubBytesThreshold);
6335 br(GE, STUB);
6336 ldr(tmp4, Address(pre(a2, start_offset)));
6337 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6338
6339 // Main 16 byte comparison loop with 2 exits
6340 bind(NEXT_DWORD); {
6341 ldr(tmp1, Address(pre(a1, wordSize)));
6342 ldr(tmp2, Address(pre(a2, wordSize)));
6343 subs(cnt1, cnt1, 2 * elem_per_word);
6344 br(LE, TAIL);
6345 eor(tmp4, tmp3, tmp4);
6346 cbnz(tmp4, DONE);
6347 ldr(tmp3, Address(pre(a1, wordSize)));
6348 ldr(tmp4, Address(pre(a2, wordSize)));
6349 cmp(cnt1, (u1)elem_per_word);
6350 br(LE, TAIL2);
6351 cmp(tmp1, tmp2);
6352 } br(EQ, NEXT_DWORD);
6353 b(DONE);
6354
6355 bind(TAIL);
6356 eor(tmp4, tmp3, tmp4);
6357 eor(tmp2, tmp1, tmp2);
6358 lslv(tmp2, tmp2, tmp5);
6359 orr(tmp5, tmp4, tmp2);
6360 cmp(tmp5, zr);
6361 b(CSET_EQ);
6362
6363 bind(TAIL2);
6364 eor(tmp2, tmp1, tmp2);
6365 cbnz(tmp2, DONE);
6366 b(LAST_CHECK);
6367
6368 bind(STUB);
6369 ldr(tmp4, Address(pre(a2, start_offset)));
6370 if (elem_size == 2) { // convert to byte counter
6371 lsl(cnt1, cnt1, 1);
6372 }
6373 eor(tmp5, tmp3, tmp4);
6374 cbnz(tmp5, DONE);
6375 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
6376 assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
6377 address tpc = trampoline_call(stub);
6378 if (tpc == nullptr) {
6379 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
6380 postcond(pc() == badAddress);
6381 return nullptr;
6382 }
6383 b(DONE);
6384
6385 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
6386 // so, if a2 == null => return false(0), else return true, so we can return a2
6387 mov(result, a2);
6388 b(DONE);
6389 bind(SHORT);
6390 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6391 ldr(tmp3, Address(a1, start_offset));
6392 ldr(tmp4, Address(a2, start_offset));
6393 bind(LAST_CHECK);
6394 eor(tmp4, tmp3, tmp4);
6395 lslv(tmp5, tmp4, tmp5);
6396 cmp(tmp5, zr);
6397 bind(CSET_EQ);
6398 cset(result, EQ);
6399 b(DONE);
6400 }
6401
6402 bind(SAME);
6403 mov(result, true);
6404 // That's it.
6405 bind(DONE);
6406
6407 BLOCK_COMMENT("} array_equals");
6408 postcond(pc() != badAddress);
6409 return pc();
6410 }
6411
6412 // Compare Strings
6413
6414 // For Strings we're passed the address of the first characters in a1
6415 // and a2 and the length in cnt1.
6416 // There are two implementations. For arrays >= 8 bytes, all
6417 // comparisons (including the final one, which may overlap) are
6418 // performed 8 bytes at a time. For strings < 8 bytes, we compare a
6419 // halfword, then a short, and then a byte.
6420
6421 void MacroAssembler::string_equals(Register a1, Register a2,
6422 Register result, Register cnt1)
6423 {
6424 Label SAME, DONE, SHORT, NEXT_WORD;
6425 Register tmp1 = rscratch1;
6426 Register tmp2 = rscratch2;
6427 Register cnt2 = tmp2; // cnt2 only used in array length compare
6428
6429 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6430
6431 #ifndef PRODUCT
6432 {
6433 char comment[64];
6434 os::snprintf_checked(comment, sizeof comment, "{string_equalsL");
6435 BLOCK_COMMENT(comment);
6436 }
6437 #endif
6438
6439 mov(result, false);
6440
6441 // Check for short strings, i.e. smaller than wordSize.
6442 subs(cnt1, cnt1, wordSize);
6443 br(Assembler::LT, SHORT);
6444 // Main 8 byte comparison loop.
6445 bind(NEXT_WORD); {
6446 ldr(tmp1, Address(post(a1, wordSize)));
6447 ldr(tmp2, Address(post(a2, wordSize)));
6448 subs(cnt1, cnt1, wordSize);
6449 eor(tmp1, tmp1, tmp2);
6450 cbnz(tmp1, DONE);
6451 } br(GT, NEXT_WORD);
6452 // Last longword. In the case where length == 4 we compare the
6453 // same longword twice, but that's still faster than another
6454 // conditional branch.
6455 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6456 // length == 4.
6457 ldr(tmp1, Address(a1, cnt1));
6458 ldr(tmp2, Address(a2, cnt1));
6459 eor(tmp2, tmp1, tmp2);
6460 cbnz(tmp2, DONE);
6461 b(SAME);
6462
6463 bind(SHORT);
6464 Label TAIL03, TAIL01;
6465
6466 tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
6467 {
6468 ldrw(tmp1, Address(post(a1, 4)));
6469 ldrw(tmp2, Address(post(a2, 4)));
6470 eorw(tmp1, tmp1, tmp2);
6471 cbnzw(tmp1, DONE);
6472 }
6473 bind(TAIL03);
6474 tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
6475 {
6476 ldrh(tmp1, Address(post(a1, 2)));
6477 ldrh(tmp2, Address(post(a2, 2)));
6478 eorw(tmp1, tmp1, tmp2);
6479 cbnzw(tmp1, DONE);
6480 }
6481 bind(TAIL01);
6482 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6483 {
6484 ldrb(tmp1, a1);
6485 ldrb(tmp2, a2);
6486 eorw(tmp1, tmp1, tmp2);
6487 cbnzw(tmp1, DONE);
6488 }
6489 // Arrays are equal.
6490 bind(SAME);
6491 mov(result, true);
6492
6493 // That's it.
6494 bind(DONE);
6495 BLOCK_COMMENT("} string_equals");
6496 }
6497
6498
6499 // The size of the blocks erased by the zero_blocks stub. We must
6500 // handle anything smaller than this ourselves in zero_words().
6501 const int MacroAssembler::zero_words_block_size = 8;
6502
6503 // zero_words() is used by C2 ClearArray patterns and by
6504 // C1_MacroAssembler. It is as small as possible, handling small word
6505 // counts locally and delegating anything larger to the zero_blocks
6506 // stub. It is expanded many times in compiled code, so it is
6507 // important to keep it short.
6508
6509 // ptr: Address of a buffer to be zeroed.
6510 // cnt: Count in HeapWords.
6511 //
6512 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
6513 address MacroAssembler::zero_words(Register ptr, Register cnt)
6514 {
6515 assert(is_power_of_2(zero_words_block_size), "adjust this");
6516
6517 BLOCK_COMMENT("zero_words {");
6518 assert(ptr == r10 && cnt == r11, "mismatch in register usage");
6519 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6520 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6521
6522 subs(rscratch1, cnt, zero_words_block_size);
6523 Label around;
6524 br(LO, around);
6525 {
6526 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6527 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6528 // Make sure this is a C2 compilation. C1 allocates space only for
6529 // trampoline stubs generated by Call LIR ops, and in any case it
6530 // makes sense for a C1 compilation task to proceed as quickly as
6531 // possible.
6532 CompileTask* task;
6533 if (StubRoutines::aarch64::complete()
6534 && Thread::current()->is_Compiler_thread()
6535 && (task = ciEnv::current()->task())
6536 && is_c2_compile(task->comp_level())) {
6537 address tpc = trampoline_call(zero_blocks);
6538 if (tpc == nullptr) {
6539 DEBUG_ONLY(reset_labels(around));
6540 return nullptr;
6541 }
6542 } else {
6543 far_call(zero_blocks);
6544 }
6545 }
6546 bind(around);
6547
6548 // We have a few words left to do. zero_blocks has adjusted r10 and r11
6549 // for us.
6550 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
6551 Label l;
6552 tbz(cnt, exact_log2(i), l);
6553 for (int j = 0; j < i; j += 2) {
6554 stp(zr, zr, post(ptr, 2 * BytesPerWord));
6555 }
6556 bind(l);
6557 }
6558 {
6559 Label l;
6560 tbz(cnt, 0, l);
6561 str(zr, Address(ptr));
6562 bind(l);
6563 }
6564
6565 BLOCK_COMMENT("} zero_words");
6566 return pc();
6567 }
6568
6569 // base: Address of a buffer to be zeroed, 8 bytes aligned.
6570 // cnt: Immediate count in HeapWords.
6571 //
6572 // r10, r11, rscratch1, and rscratch2 are clobbered.
6573 address MacroAssembler::zero_words(Register base, uint64_t cnt)
6574 {
6575 assert(wordSize <= BlockZeroingLowLimit,
6576 "increase BlockZeroingLowLimit");
6577 address result = nullptr;
6578 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
6579 #ifndef PRODUCT
6580 {
6581 char buf[64];
6582 os::snprintf_checked(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
6583 BLOCK_COMMENT(buf);
6584 }
6585 #endif
6586 if (cnt >= 16) {
6587 uint64_t loops = cnt/16;
6588 if (loops > 1) {
6589 mov(rscratch2, loops - 1);
6590 }
6591 {
6592 Label loop;
6593 bind(loop);
6594 for (int i = 0; i < 16; i += 2) {
6595 stp(zr, zr, Address(base, i * BytesPerWord));
6596 }
6597 add(base, base, 16 * BytesPerWord);
6598 if (loops > 1) {
6599 subs(rscratch2, rscratch2, 1);
6600 br(GE, loop);
6601 }
6602 }
6603 }
6604 cnt %= 16;
6605 int i = cnt & 1; // store any odd word to start
6606 if (i) str(zr, Address(base));
6607 for (; i < (int)cnt; i += 2) {
6608 stp(zr, zr, Address(base, i * wordSize));
6609 }
6610 BLOCK_COMMENT("} zero_words");
6611 result = pc();
6612 } else {
6613 mov(r10, base); mov(r11, cnt);
6614 result = zero_words(r10, r11);
6615 }
6616 return result;
6617 }
6618
6619 // Zero blocks of memory by using DC ZVA.
6620 //
6621 // Aligns the base address first sufficiently for DC ZVA, then uses
6622 // DC ZVA repeatedly for every full block. cnt is the size to be
6623 // zeroed in HeapWords. Returns the count of words left to be zeroed
6624 // in cnt.
6625 //
6626 // NOTE: This is intended to be used in the zero_blocks() stub. If
6627 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
6628 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
6629 Register tmp = rscratch1;
6630 Register tmp2 = rscratch2;
6631 int zva_length = VM_Version::zva_length();
6632 Label initial_table_end, loop_zva;
6633 Label fini;
6634
6635 // Base must be 16 byte aligned. If not just return and let caller handle it
6636 tst(base, 0x0f);
6637 br(Assembler::NE, fini);
6638 // Align base with ZVA length.
6639 neg(tmp, base);
6640 andr(tmp, tmp, zva_length - 1);
6641
6642 // tmp: the number of bytes to be filled to align the base with ZVA length.
6643 add(base, base, tmp);
6644 sub(cnt, cnt, tmp, Assembler::ASR, 3);
6645 adr(tmp2, initial_table_end);
6646 sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
6647 br(tmp2);
6648
6649 for (int i = -zva_length + 16; i < 0; i += 16)
6650 stp(zr, zr, Address(base, i));
6651 bind(initial_table_end);
6652
6653 sub(cnt, cnt, zva_length >> 3);
6654 bind(loop_zva);
6655 dc(Assembler::ZVA, base);
6656 subs(cnt, cnt, zva_length >> 3);
6657 add(base, base, zva_length);
6658 br(Assembler::GE, loop_zva);
6659 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6660 bind(fini);
6661 }
6662
6663 // base: Address of a buffer to be filled, 8 bytes aligned.
6664 // cnt: Count in 8-byte unit.
6665 // value: Value to be filled with.
6666 // base will point to the end of the buffer after filling.
6667 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6668 {
6669 // Algorithm:
6670 //
6671 // if (cnt == 0) {
6672 // return;
6673 // }
6674 // if ((p & 8) != 0) {
6675 // *p++ = v;
6676 // }
6677 //
6678 // scratch1 = cnt & 14;
6679 // cnt -= scratch1;
6680 // p += scratch1;
6681 // switch (scratch1 / 2) {
6682 // do {
6683 // cnt -= 16;
6684 // p[-16] = v;
6685 // p[-15] = v;
6686 // case 7:
6687 // p[-14] = v;
6688 // p[-13] = v;
6689 // case 6:
6690 // p[-12] = v;
6691 // p[-11] = v;
6692 // // ...
6693 // case 1:
6694 // p[-2] = v;
6695 // p[-1] = v;
6696 // case 0:
6697 // p += 16;
6698 // } while (cnt);
6699 // }
6700 // if ((cnt & 1) == 1) {
6701 // *p++ = v;
6702 // }
6703
6704 assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6705
6706 Label fini, skip, entry, loop;
6707 const int unroll = 8; // Number of stp instructions we'll unroll
6708
6709 cbz(cnt, fini);
6710 tbz(base, 3, skip);
6711 str(value, Address(post(base, 8)));
6712 sub(cnt, cnt, 1);
6713 bind(skip);
6714
6715 andr(rscratch1, cnt, (unroll-1) * 2);
6716 sub(cnt, cnt, rscratch1);
6717 add(base, base, rscratch1, Assembler::LSL, 3);
6718 adr(rscratch2, entry);
6719 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6720 br(rscratch2);
6721
6722 bind(loop);
6723 add(base, base, unroll * 16);
6724 for (int i = -unroll; i < 0; i++)
6725 stp(value, value, Address(base, i * 16));
6726 bind(entry);
6727 subs(cnt, cnt, unroll * 2);
6728 br(Assembler::GE, loop);
6729
6730 tbz(cnt, 0, fini);
6731 str(value, Address(post(base, 8)));
6732 bind(fini);
6733 }
6734
6735 // Intrinsic for
6736 //
6737 // - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6738 // Encodes char[] to byte[] in ISO-8859-1
6739 //
6740 // - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6741 // Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
6742 //
6743 // - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6744 // Encodes char[] to byte[] in ASCII
6745 //
6746 // This version always returns the number of characters copied, and does not
6747 // clobber the 'len' register. A successful copy will complete with the post-
6748 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
6749 // post-condition: 0 <= 'res' < 'len'.
6750 //
6751 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6752 // degrade performance (on Ampere Altra - Neoverse N1), to an extent
6753 // beyond the acceptable, even though the footprint would be smaller.
6754 // Using 'umaxv' in the ASCII-case comes with a small penalty but does
6755 // avoid additional bloat.
6756 //
6757 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6758 void MacroAssembler::encode_iso_array(Register src, Register dst,
6759 Register len, Register res, bool ascii,
6760 FloatRegister vtmp0, FloatRegister vtmp1,
6761 FloatRegister vtmp2, FloatRegister vtmp3,
6762 FloatRegister vtmp4, FloatRegister vtmp5)
6763 {
6764 Register cnt = res;
6765 Register max = rscratch1;
6766 Register chk = rscratch2;
6767
6768 prfm(Address(src), PLDL1STRM);
6769 movw(cnt, len);
6770
6771 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
6772
6773 Label LOOP_32, DONE_32, FAIL_32;
6774
6775 BIND(LOOP_32);
6776 {
6777 cmpw(cnt, 32);
6778 br(LT, DONE_32);
6779 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6780 // Extract lower bytes.
6781 FloatRegister vlo0 = vtmp4;
6782 FloatRegister vlo1 = vtmp5;
6783 uzp1(vlo0, T16B, vtmp0, vtmp1);
6784 uzp1(vlo1, T16B, vtmp2, vtmp3);
6785 // Merge bits...
6786 orr(vtmp0, T16B, vtmp0, vtmp1);
6787 orr(vtmp2, T16B, vtmp2, vtmp3);
6788 // Extract merged upper bytes.
6789 FloatRegister vhix = vtmp0;
6790 uzp2(vhix, T16B, vtmp0, vtmp2);
6791 // ISO-check on hi-parts (all zero).
6792 // ASCII-check on lo-parts (no sign).
6793 FloatRegister vlox = vtmp1; // Merge lower bytes.
6794 ASCII(orr(vlox, T16B, vlo0, vlo1));
6795 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox));
6796 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox));
6797 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0));
6798 ASCII(orr(chk, chk, max));
6799 cbnz(chk, FAIL_32);
6800 subw(cnt, cnt, 32);
6801 st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6802 b(LOOP_32);
6803 }
6804 BIND(FAIL_32);
6805 sub(src, src, 64);
6806 BIND(DONE_32);
6807
6808 Label LOOP_8, SKIP_8;
6809
6810 BIND(LOOP_8);
6811 {
6812 cmpw(cnt, 8);
6813 br(LT, SKIP_8);
6814 FloatRegister vhi = vtmp0;
6815 FloatRegister vlo = vtmp1;
6816 ld1(vtmp3, T8H, src);
6817 uzp1(vlo, T16B, vtmp3, vtmp3);
6818 uzp2(vhi, T16B, vtmp3, vtmp3);
6819 // ISO-check on hi-parts (all zero).
6820 // ASCII-check on lo-parts (no sign).
6821 ASCII(cm(LT, vtmp2, T16B, vlo));
6822 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2));
6823 ASCII(umov(max, vtmp2, B, 0));
6824 ASCII(orr(chk, chk, max));
6825 cbnz(chk, SKIP_8);
6826
6827 strd(vlo, Address(post(dst, 8)));
6828 subw(cnt, cnt, 8);
6829 add(src, src, 16);
6830 b(LOOP_8);
6831 }
6832 BIND(SKIP_8);
6833
6834 #undef ASCII
6835
6836 Label LOOP, DONE;
6837
6838 cbz(cnt, DONE);
6839 BIND(LOOP);
6840 {
6841 Register chr = rscratch1;
6842 ldrh(chr, Address(post(src, 2)));
6843 tst(chr, ascii ? 0xff80 : 0xff00);
6844 br(NE, DONE);
6845 strb(chr, Address(post(dst, 1)));
6846 subs(cnt, cnt, 1);
6847 br(GT, LOOP);
6848 }
6849 BIND(DONE);
6850 // Return index where we stopped.
6851 subw(res, len, cnt);
6852 }
6853
6854 // Inflate byte[] array to char[].
6855 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6856 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6857 FloatRegister vtmp1, FloatRegister vtmp2,
6858 FloatRegister vtmp3, Register tmp4) {
6859 Label big, done, after_init, to_stub;
6860
6861 assert_different_registers(src, dst, len, tmp4, rscratch1);
6862
6863 fmovd(vtmp1, 0.0);
6864 lsrw(tmp4, len, 3);
6865 bind(after_init);
6866 cbnzw(tmp4, big);
6867 // Short string: less than 8 bytes.
6868 {
6869 Label loop, tiny;
6870
6871 cmpw(len, 4);
6872 br(LT, tiny);
6873 // Use SIMD to do 4 bytes.
6874 ldrs(vtmp2, post(src, 4));
6875 zip1(vtmp3, T8B, vtmp2, vtmp1);
6876 subw(len, len, 4);
6877 strd(vtmp3, post(dst, 8));
6878
6879 cbzw(len, done);
6880
6881 // Do the remaining bytes by steam.
6882 bind(loop);
6883 ldrb(tmp4, post(src, 1));
6884 strh(tmp4, post(dst, 2));
6885 subw(len, len, 1);
6886
6887 bind(tiny);
6888 cbnz(len, loop);
6889
6890 b(done);
6891 }
6892
6893 if (SoftwarePrefetchHintDistance >= 0) {
6894 bind(to_stub);
6895 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6896 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6897 address tpc = trampoline_call(stub);
6898 if (tpc == nullptr) {
6899 DEBUG_ONLY(reset_labels(big, done));
6900 postcond(pc() == badAddress);
6901 return nullptr;
6902 }
6903 b(after_init);
6904 }
6905
6906 // Unpack the bytes 8 at a time.
6907 bind(big);
6908 {
6909 Label loop, around, loop_last, loop_start;
6910
6911 if (SoftwarePrefetchHintDistance >= 0) {
6912 const int large_loop_threshold = (64 + 16)/8;
6913 ldrd(vtmp2, post(src, 8));
6914 andw(len, len, 7);
6915 cmp(tmp4, (u1)large_loop_threshold);
6916 br(GE, to_stub);
6917 b(loop_start);
6918
6919 bind(loop);
6920 ldrd(vtmp2, post(src, 8));
6921 bind(loop_start);
6922 subs(tmp4, tmp4, 1);
6923 br(EQ, loop_last);
6924 zip1(vtmp2, T16B, vtmp2, vtmp1);
6925 ldrd(vtmp3, post(src, 8));
6926 st1(vtmp2, T8H, post(dst, 16));
6927 subs(tmp4, tmp4, 1);
6928 zip1(vtmp3, T16B, vtmp3, vtmp1);
6929 st1(vtmp3, T8H, post(dst, 16));
6930 br(NE, loop);
6931 b(around);
6932 bind(loop_last);
6933 zip1(vtmp2, T16B, vtmp2, vtmp1);
6934 st1(vtmp2, T8H, post(dst, 16));
6935 bind(around);
6936 cbz(len, done);
6937 } else {
6938 andw(len, len, 7);
6939 bind(loop);
6940 ldrd(vtmp2, post(src, 8));
6941 sub(tmp4, tmp4, 1);
6942 zip1(vtmp3, T16B, vtmp2, vtmp1);
6943 st1(vtmp3, T8H, post(dst, 16));
6944 cbnz(tmp4, loop);
6945 }
6946 }
6947
6948 // Do the tail of up to 8 bytes.
6949 add(src, src, len);
6950 ldrd(vtmp3, Address(src, -8));
6951 add(dst, dst, len, ext::uxtw, 1);
6952 zip1(vtmp3, T16B, vtmp3, vtmp1);
6953 strq(vtmp3, Address(dst, -16));
6954
6955 bind(done);
6956 postcond(pc() != badAddress);
6957 return pc();
6958 }
6959
6960 // Compress char[] array to byte[].
6961 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
6962 // Return the array length if every element in array can be encoded,
6963 // otherwise, the index of first non-latin1 (> 0xff) character.
6964 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
6965 Register res,
6966 FloatRegister tmp0, FloatRegister tmp1,
6967 FloatRegister tmp2, FloatRegister tmp3,
6968 FloatRegister tmp4, FloatRegister tmp5) {
6969 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
6970 }
6971
6972 // java.math.round(double a)
6973 // Returns the closest long to the argument, with ties rounding to
6974 // positive infinity. This requires some fiddling for corner
6975 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
6976 void MacroAssembler::java_round_double(Register dst, FloatRegister src,
6977 FloatRegister ftmp) {
6978 Label DONE;
6979 BLOCK_COMMENT("java_round_double: { ");
6980 fmovd(rscratch1, src);
6981 // Use RoundToNearestTiesAway unless src small and -ve.
6982 fcvtasd(dst, src);
6983 // Test if src >= 0 || abs(src) >= 0x1.0p52
6984 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
6985 mov(rscratch2, julong_cast(0x1.0p52));
6986 cmp(rscratch1, rscratch2);
6987 br(HS, DONE); {
6988 // src < 0 && abs(src) < 0x1.0p52
6989 // src may have a fractional part, so add 0.5
6990 fmovd(ftmp, 0.5);
6991 faddd(ftmp, src, ftmp);
6992 // Convert double to jlong, use RoundTowardsNegative
6993 fcvtmsd(dst, ftmp);
6994 }
6995 bind(DONE);
6996 BLOCK_COMMENT("} java_round_double");
6997 }
6998
6999 void MacroAssembler::java_round_float(Register dst, FloatRegister src,
7000 FloatRegister ftmp) {
7001 Label DONE;
7002 BLOCK_COMMENT("java_round_float: { ");
7003 fmovs(rscratch1, src);
7004 // Use RoundToNearestTiesAway unless src small and -ve.
7005 fcvtassw(dst, src);
7006 // Test if src >= 0 || abs(src) >= 0x1.0p23
7007 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
7008 mov(rscratch2, jint_cast(0x1.0p23f));
7009 cmp(rscratch1, rscratch2);
7010 br(HS, DONE); {
7011 // src < 0 && |src| < 0x1.0p23
7012 // src may have a fractional part, so add 0.5
7013 fmovs(ftmp, 0.5f);
7014 fadds(ftmp, src, ftmp);
7015 // Convert float to jint, use RoundTowardsNegative
7016 fcvtmssw(dst, ftmp);
7017 }
7018 bind(DONE);
7019 BLOCK_COMMENT("} java_round_float");
7020 }
7021
7022 // get_thread() can be called anywhere inside generated code so we
7023 // need to save whatever non-callee save context might get clobbered
7024 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
7025 // the call setup code.
7026 //
7027 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
7028 // On other systems, the helper is a usual C function.
7029 //
7030 void MacroAssembler::get_thread(Register dst) {
7031 RegSet saved_regs =
7032 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
7033 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
7034
7035 protect_return_address();
7036 push(saved_regs, sp);
7037
7038 mov(lr, ExternalAddress(CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)));
7039 blr(lr);
7040 if (dst != c_rarg0) {
7041 mov(dst, c_rarg0);
7042 }
7043
7044 pop(saved_regs, sp);
7045 authenticate_return_address();
7046 }
7047
7048 #ifdef COMPILER2
7049 // C2 compiled method's prolog code
7050 // Moved here from aarch64.ad to support Valhalla code belows
7051 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
7052 if (C->clinit_barrier_on_entry()) {
7053 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
7054
7055 Label L_skip_barrier;
7056
7057 mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
7058 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
7059 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
7060 bind(L_skip_barrier);
7061 }
7062
7063 if (C->max_vector_size() > 0) {
7064 reinitialize_ptrue();
7065 }
7066
7067 int bangsize = C->output()->bang_size_in_bytes();
7068 if (C->output()->need_stack_bang(bangsize))
7069 generate_stack_overflow_check(bangsize);
7070
7071 // n.b. frame size includes space for return pc and rfp
7072 const long framesize = C->output()->frame_size_in_bytes();
7073 build_frame(framesize);
7074
7075 if (C->needs_stack_repair()) {
7076 save_stack_increment(sp_inc, framesize);
7077 }
7078
7079 if (VerifyStackAtCalls) {
7080 Unimplemented();
7081 }
7082 }
7083 #endif // COMPILER2
7084
7085 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
7086 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
7087 // An inline type might be returned. If fields are in registers we
7088 // need to allocate an inline type instance and initialize it with
7089 // the value of the fields.
7090 Label skip;
7091 // We only need a new buffered inline type if a new one is not returned
7092 tbz(r0, 0, skip);
7093 int call_offset = -1;
7094
7095 // Be careful not to clobber r1-7 which hold returned fields
7096 // Also do not use callee-saved registers as these may be live in the interpreter
7097 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
7098
7099 // The following code is similar to allocate_instance but has some slight differences,
7100 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
7101 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
7102 Label slow_case;
7103 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
7104 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
7105
7106 if (vk != nullptr) {
7107 // Called from C1, where the return type is statically known.
7108 movptr(klass, (intptr_t)vk->get_InlineKlass());
7109 jint lh = vk->layout_helper();
7110 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
7111 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
7112 tlab_allocate(r0, noreg, lh, tmp1, tmp2, slow_case);
7113 } else {
7114 b(slow_case);
7115 }
7116 } else {
7117 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
7118 andr(klass, r0, -2);
7119 if (UseTLAB) {
7120 ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
7121 tst(tmp2, Klass::_lh_instance_slow_path_bit);
7122 br(Assembler::NE, slow_case);
7123 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
7124 } else {
7125 b(slow_case);
7126 }
7127 }
7128 if (UseTLAB) {
7129 // 2. Initialize buffered inline instance header
7130 Register buffer_obj = r0;
7131 if (UseCompactObjectHeaders) {
7132 ldr(rscratch1, Address(klass, Klass::prototype_header_offset()));
7133 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7134 } else {
7135 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
7136 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7137 store_klass_gap(buffer_obj, zr);
7138 if (vk == nullptr) {
7139 // store_klass corrupts klass, so save it for later use (interpreter case only).
7140 mov(tmp1, klass);
7141 }
7142 store_klass(buffer_obj, klass);
7143 klass = tmp1;
7144 }
7145 // 3. Initialize its fields with an inline class specific handler
7146 if (vk != nullptr) {
7147 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
7148 } else {
7149 ldr(tmp1, Address(klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
7150 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
7151 blr(tmp1);
7152 }
7153
7154 membar(Assembler::StoreStore);
7155 b(skip);
7156 } else {
7157 // Must have already branched to slow_case above.
7158 DEBUG_ONLY(should_not_reach_here());
7159 }
7160 bind(slow_case);
7161 // We failed to allocate a new inline type, fall back to a runtime
7162 // call. Some oop field may be live in some registers but we can't
7163 // tell. That runtime call will take care of preserving them
7164 // across a GC if there's one.
7165 mov(r0, r0_preserved);
7166
7167 if (from_interpreter) {
7168 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
7169 } else {
7170 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
7171 call_offset = offset();
7172 }
7173 membar(Assembler::StoreStore);
7174
7175 bind(skip);
7176 return call_offset;
7177 }
7178
7179 // Move a value between registers/stack slots and update the reg_state
7180 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
7181 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
7182 if (reg_state[to->value()] == reg_written) {
7183 return true; // Already written
7184 }
7185
7186 if (from != to && bt != T_VOID) {
7187 if (reg_state[to->value()] == reg_readonly) {
7188 return false; // Not yet writable
7189 }
7190 if (from->is_reg()) {
7191 if (to->is_reg()) {
7192 if (from->is_Register() && to->is_Register()) {
7193 mov(to->as_Register(), from->as_Register());
7194 } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
7195 fmovd(to->as_FloatRegister(), from->as_FloatRegister());
7196 } else {
7197 ShouldNotReachHere();
7198 }
7199 } else {
7200 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7201 Address to_addr = Address(sp, st_off);
7202 if (from->is_FloatRegister()) {
7203 if (bt == T_DOUBLE) {
7204 strd(from->as_FloatRegister(), to_addr);
7205 } else {
7206 assert(bt == T_FLOAT, "must be float");
7207 strs(from->as_FloatRegister(), to_addr);
7208 }
7209 } else {
7210 str(from->as_Register(), to_addr);
7211 }
7212 }
7213 } else {
7214 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
7215 if (to->is_reg()) {
7216 if (to->is_FloatRegister()) {
7217 if (bt == T_DOUBLE) {
7218 ldrd(to->as_FloatRegister(), from_addr);
7219 } else {
7220 assert(bt == T_FLOAT, "must be float");
7221 ldrs(to->as_FloatRegister(), from_addr);
7222 }
7223 } else {
7224 ldr(to->as_Register(), from_addr);
7225 }
7226 } else {
7227 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7228 ldr(rscratch1, from_addr);
7229 str(rscratch1, Address(sp, st_off));
7230 }
7231 }
7232 }
7233
7234 // Update register states
7235 reg_state[from->value()] = reg_writable;
7236 reg_state[to->value()] = reg_written;
7237 return true;
7238 }
7239
7240 // Calculate the extra stack space required for packing or unpacking inline
7241 // args and adjust the stack pointer
7242 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
7243 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
7244 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
7245 assert(sp_inc > 0, "sanity");
7246
7247 // Save a copy of the FP and LR here for deoptimization patching and frame walking
7248 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7249
7250 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
7251 if (sp_inc < (1 << 9)) {
7252 sub(sp, sp, sp_inc); // Fits in an immediate
7253 } else {
7254 mov(rscratch1, sp_inc);
7255 sub(sp, sp, rscratch1);
7256 }
7257
7258 return sp_inc + 2 * wordSize; // Account for the FP/LR space
7259 }
7260
7261 // Read all fields from an inline type oop and store the values in registers/stack slots
7262 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
7263 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
7264 RegState reg_state[]) {
7265 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
7266 assert(from->is_valid(), "source must be valid");
7267 bool progress = false;
7268 #ifdef ASSERT
7269 const int start_offset = offset();
7270 #endif
7271
7272 Label L_null, L_notNull;
7273 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
7274 // TODO 8366717 We need to make sure that r14 (and potentially other long-life regs) are kept live in slowpath runtime calls in GC barriers
7275 Register tmp1 = r10;
7276 Register tmp2 = r11;
7277 Register fromReg = noreg;
7278 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
7279 bool done = true;
7280 bool mark_done = true;
7281 VMReg toReg;
7282 BasicType bt;
7283 // Check if argument requires a null check
7284 bool null_check = false;
7285 VMReg nullCheckReg;
7286 while (stream.next(nullCheckReg, bt)) {
7287 if (sig->at(stream.sig_index())._offset == -1) {
7288 null_check = true;
7289 break;
7290 }
7291 }
7292 stream.reset(sig_index, to_index);
7293 while (stream.next(toReg, bt)) {
7294 assert(toReg->is_valid(), "destination must be valid");
7295 int idx = (int)toReg->value();
7296 if (reg_state[idx] == reg_readonly) {
7297 if (idx != from->value()) {
7298 mark_done = false;
7299 }
7300 done = false;
7301 continue;
7302 } else if (reg_state[idx] == reg_written) {
7303 continue;
7304 }
7305 assert(reg_state[idx] == reg_writable, "must be writable");
7306 reg_state[idx] = reg_written;
7307 progress = true;
7308
7309 if (fromReg == noreg) {
7310 if (from->is_reg()) {
7311 fromReg = from->as_Register();
7312 } else {
7313 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
7314 ldr(tmp1, Address(sp, st_off));
7315 fromReg = tmp1;
7316 }
7317 if (null_check) {
7318 // Nullable inline type argument, emit null check
7319 cbz(fromReg, L_null);
7320 }
7321 }
7322 int off = sig->at(stream.sig_index())._offset;
7323 if (off == -1) {
7324 assert(null_check, "Missing null check at");
7325 if (toReg->is_stack()) {
7326 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7327 mov(tmp2, 1);
7328 str(tmp2, Address(sp, st_off));
7329 } else {
7330 mov(toReg->as_Register(), 1);
7331 }
7332 continue;
7333 }
7334 assert(off > 0, "offset in object should be positive");
7335 Address fromAddr = Address(fromReg, off);
7336 if (!toReg->is_FloatRegister()) {
7337 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
7338 if (is_reference_type(bt)) {
7339 load_heap_oop(dst, fromAddr, rscratch1, rscratch2);
7340 } else {
7341 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
7342 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
7343 }
7344 if (toReg->is_stack()) {
7345 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7346 str(dst, Address(sp, st_off));
7347 }
7348 } else if (bt == T_DOUBLE) {
7349 ldrd(toReg->as_FloatRegister(), fromAddr);
7350 } else {
7351 assert(bt == T_FLOAT, "must be float");
7352 ldrs(toReg->as_FloatRegister(), fromAddr);
7353 }
7354 }
7355 if (progress && null_check) {
7356 if (done) {
7357 b(L_notNull);
7358 bind(L_null);
7359 // Set null marker to zero to signal that the argument is null.
7360 // Also set all oop fields to zero to make the GC happy.
7361 stream.reset(sig_index, to_index);
7362 while (stream.next(toReg, bt)) {
7363 if (sig->at(stream.sig_index())._offset == -1 ||
7364 bt == T_OBJECT || bt == T_ARRAY) {
7365 if (toReg->is_stack()) {
7366 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7367 str(zr, Address(sp, st_off));
7368 } else {
7369 mov(toReg->as_Register(), zr);
7370 }
7371 }
7372 }
7373 bind(L_notNull);
7374 } else {
7375 bind(L_null);
7376 }
7377 }
7378
7379 // TODO 8366717 This is probably okay but looks fishy because stream is reset in the "Set null marker to zero" case just above. Same on x64.
7380 sig_index = stream.sig_index();
7381 to_index = stream.regs_index();
7382
7383 if (mark_done && reg_state[from->value()] != reg_written) {
7384 // This is okay because no one else will write to that slot
7385 reg_state[from->value()] = reg_writable;
7386 }
7387 from_index--;
7388 assert(progress || (start_offset == offset()), "should not emit code");
7389 return done;
7390 }
7391
7392 // Pack fields back into an inline type oop
7393 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
7394 VMRegPair* from, int from_count, int& from_index, VMReg to,
7395 RegState reg_state[], Register val_array) {
7396 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
7397 assert(to->is_valid(), "destination must be valid");
7398
7399 if (reg_state[to->value()] == reg_written) {
7400 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7401 return true; // Already written
7402 }
7403
7404 // The GC barrier expanded by store_heap_oop below may call into the
7405 // runtime so use callee-saved registers for any values that need to be
7406 // preserved. The GC barrier assembler should take care of saving the
7407 // Java argument registers.
7408 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
7409 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
7410 Register val_obj_tmp = r21;
7411 Register from_reg_tmp = r22;
7412 Register tmp1 = r14;
7413 Register tmp2 = r13;
7414 Register tmp3 = r12;
7415 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
7416
7417 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
7418
7419 if (reg_state[to->value()] == reg_readonly) {
7420 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
7421 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7422 return false; // Not yet writable
7423 }
7424 val_obj = val_obj_tmp;
7425 }
7426
7427 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
7428 load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2);
7429
7430 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
7431 VMReg fromReg;
7432 BasicType bt;
7433 Label L_null;
7434 while (stream.next(fromReg, bt)) {
7435 assert(fromReg->is_valid(), "source must be valid");
7436 reg_state[fromReg->value()] = reg_writable;
7437
7438 int off = sig->at(stream.sig_index())._offset;
7439 if (off == -1) {
7440 // Nullable inline type argument, emit null check
7441 Label L_notNull;
7442 if (fromReg->is_stack()) {
7443 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7444 ldrb(tmp2, Address(sp, ld_off));
7445 cbnz(tmp2, L_notNull);
7446 } else {
7447 cbnz(fromReg->as_Register(), L_notNull);
7448 }
7449 mov(val_obj, 0);
7450 b(L_null);
7451 bind(L_notNull);
7452 continue;
7453 }
7454
7455 assert(off > 0, "offset in object should be positive");
7456 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
7457
7458 // Pack the scalarized field into the value object.
7459 Address dst(val_obj, off);
7460 if (!fromReg->is_FloatRegister()) {
7461 Register src;
7462 if (fromReg->is_stack()) {
7463 src = from_reg_tmp;
7464 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7465 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
7466 } else {
7467 src = fromReg->as_Register();
7468 }
7469 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
7470 if (is_reference_type(bt)) {
7471 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
7472 } else {
7473 store_sized_value(dst, src, size_in_bytes);
7474 }
7475 } else if (bt == T_DOUBLE) {
7476 strd(fromReg->as_FloatRegister(), dst);
7477 } else {
7478 assert(bt == T_FLOAT, "must be float");
7479 strs(fromReg->as_FloatRegister(), dst);
7480 }
7481 }
7482 bind(L_null);
7483 sig_index = stream.sig_index();
7484 from_index = stream.regs_index();
7485
7486 assert(reg_state[to->value()] == reg_writable, "must have already been read");
7487 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
7488 assert(success, "to register must be writeable");
7489 return true;
7490 }
7491
7492 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
7493 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg();
7494 }
7495
7496 void MacroAssembler::cache_wb(Address line) {
7497 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
7498 assert(line.index() == noreg, "index should be noreg");
7499 assert(line.offset() == 0, "offset should be 0");
7500 // would like to assert this
7501 // assert(line._ext.shift == 0, "shift should be zero");
7502 if (VM_Version::supports_dcpop()) {
7503 // writeback using clear virtual address to point of persistence
7504 dc(Assembler::CVAP, line.base());
7505 } else {
7506 // no need to generate anything as Unsafe.writebackMemory should
7507 // never invoke this stub
7508 }
7509 }
7510
7511 void MacroAssembler::cache_wbsync(bool is_pre) {
7512 // we only need a barrier post sync
7513 if (!is_pre) {
7514 membar(Assembler::AnyAny);
7515 }
7516 }
7517
7518 void MacroAssembler::verify_sve_vector_length(Register tmp) {
7519 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) {
7520 return;
7521 }
7522 // Make sure that native code does not change SVE vector length.
7523 Label verify_ok;
7524 movw(tmp, zr);
7525 sve_inc(tmp, B);
7526 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
7527 br(EQ, verify_ok);
7528 stop("Error: SVE vector length has changed since jvm startup");
7529 bind(verify_ok);
7530 }
7531
7532 void MacroAssembler::verify_ptrue() {
7533 Label verify_ok;
7534 if (!UseSVE) {
7535 return;
7536 }
7537 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
7538 sve_dec(rscratch1, B);
7539 cbz(rscratch1, verify_ok);
7540 stop("Error: the preserved predicate register (p7) elements are not all true");
7541 bind(verify_ok);
7542 }
7543
7544 void MacroAssembler::safepoint_isb() {
7545 isb();
7546 #ifndef PRODUCT
7547 if (VerifyCrossModifyFence) {
7548 // Clear the thread state.
7549 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7550 }
7551 #endif
7552 }
7553
7554 #ifndef PRODUCT
7555 void MacroAssembler::verify_cross_modify_fence_not_required() {
7556 if (VerifyCrossModifyFence) {
7557 // Check if thread needs a cross modify fence.
7558 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7559 Label fence_not_required;
7560 cbz(rscratch1, fence_not_required);
7561 // If it does then fail.
7562 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
7563 mov(c_rarg0, rthread);
7564 blr(rscratch1);
7565 bind(fence_not_required);
7566 }
7567 }
7568 #endif
7569
7570 void MacroAssembler::spin_wait() {
7571 block_comment("spin_wait {");
7572 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
7573 switch (VM_Version::spin_wait_desc().inst()) {
7574 case SpinWait::NOP:
7575 nop();
7576 break;
7577 case SpinWait::ISB:
7578 isb();
7579 break;
7580 case SpinWait::YIELD:
7581 yield();
7582 break;
7583 case SpinWait::SB:
7584 assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
7585 sb();
7586 break;
7587 default:
7588 ShouldNotReachHere();
7589 }
7590 }
7591 block_comment("}");
7592 }
7593
7594 // Stack frame creation/removal
7595
7596 void MacroAssembler::enter(bool strip_ret_addr) {
7597 if (strip_ret_addr) {
7598 // Addresses can only be signed once. If there are multiple nested frames being created
7599 // in the same function, then the return address needs stripping first.
7600 strip_return_address();
7601 }
7602 protect_return_address();
7603 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7604 mov(rfp, sp);
7605 }
7606
7607 void MacroAssembler::leave() {
7608 mov(sp, rfp);
7609 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
7610 authenticate_return_address();
7611 }
7612
7613 // ROP Protection
7614 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
7615 // destroying stack frames or whenever directly loading/storing the LR to memory.
7616 // If ROP protection is not set then these functions are no-ops.
7617 // For more details on PAC see pauth_aarch64.hpp.
7618
7619 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
7620 // Uses value zero as the modifier.
7621 //
7622 void MacroAssembler::protect_return_address() {
7623 if (VM_Version::use_rop_protection()) {
7624 check_return_address();
7625 paciaz();
7626 }
7627 }
7628
7629 // Sign the return value in the given register. Use before updating the LR in the existing stack
7630 // frame for the current function.
7631 // Uses value zero as the modifier.
7632 //
7633 void MacroAssembler::protect_return_address(Register return_reg) {
7634 if (VM_Version::use_rop_protection()) {
7635 check_return_address(return_reg);
7636 paciza(return_reg);
7637 }
7638 }
7639
7640 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
7641 // Uses value zero as the modifier.
7642 //
7643 void MacroAssembler::authenticate_return_address() {
7644 if (VM_Version::use_rop_protection()) {
7645 autiaz();
7646 check_return_address();
7647 }
7648 }
7649
7650 // Authenticate the return value in the given register. Use before updating the LR in the existing
7651 // stack frame for the current function.
7652 // Uses value zero as the modifier.
7653 //
7654 void MacroAssembler::authenticate_return_address(Register return_reg) {
7655 if (VM_Version::use_rop_protection()) {
7656 autiza(return_reg);
7657 check_return_address(return_reg);
7658 }
7659 }
7660
7661 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
7662 // there is no guaranteed way of authenticating the LR.
7663 //
7664 void MacroAssembler::strip_return_address() {
7665 if (VM_Version::use_rop_protection()) {
7666 xpaclri();
7667 }
7668 }
7669
7670 #ifndef PRODUCT
7671 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
7672 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
7673 // it is difficult to debug back to the callee function.
7674 // This function simply loads from the address in the given register.
7675 // Use directly after authentication to catch authentication failures.
7676 // Also use before signing to check that the pointer is valid and hasn't already been signed.
7677 //
7678 void MacroAssembler::check_return_address(Register return_reg) {
7679 if (VM_Version::use_rop_protection()) {
7680 ldr(zr, Address(return_reg));
7681 }
7682 }
7683 #endif
7684
7685 // The java_calling_convention describes stack locations as ideal slots on
7686 // a frame with no abi restrictions. Since we must observe abi restrictions
7687 // (like the placement of the register window) the slots must be biased by
7688 // the following value.
7689 static int reg2offset_in(VMReg r) {
7690 // Account for saved rfp and lr
7691 // This should really be in_preserve_stack_slots
7692 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
7693 }
7694
7695 static int reg2offset_out(VMReg r) {
7696 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
7697 }
7698
7699 // On 64bit we will store integer like items to the stack as
7700 // 64bits items (AArch64 ABI) even though java would only store
7701 // 32bits for a parameter. On 32bit it will simply be 32bits
7702 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
7703 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
7704 if (src.first()->is_stack()) {
7705 if (dst.first()->is_stack()) {
7706 // stack to stack
7707 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7708 str(tmp, Address(sp, reg2offset_out(dst.first())));
7709 } else {
7710 // stack to reg
7711 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7712 }
7713 } else if (dst.first()->is_stack()) {
7714 // reg to stack
7715 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7716 } else {
7717 if (dst.first() != src.first()) {
7718 sxtw(dst.first()->as_Register(), src.first()->as_Register());
7719 }
7720 }
7721 }
7722
7723 // An oop arg. Must pass a handle not the oop itself
7724 void MacroAssembler::object_move(
7725 OopMap* map,
7726 int oop_handle_offset,
7727 int framesize_in_slots,
7728 VMRegPair src,
7729 VMRegPair dst,
7730 bool is_receiver,
7731 int* receiver_offset) {
7732
7733 // must pass a handle. First figure out the location we use as a handle
7734
7735 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
7736
7737 // See if oop is null if it is we need no handle
7738
7739 if (src.first()->is_stack()) {
7740
7741 // Oop is already on the stack as an argument
7742 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
7743 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
7744 if (is_receiver) {
7745 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
7746 }
7747
7748 ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
7749 lea(rHandle, Address(rfp, reg2offset_in(src.first())));
7750 // conditionally move a null
7751 cmp(rscratch1, zr);
7752 csel(rHandle, zr, rHandle, Assembler::EQ);
7753 } else {
7754
7755 // Oop is in an a register we must store it to the space we reserve
7756 // on the stack for oop_handles and pass a handle if oop is non-null
7757
7758 const Register rOop = src.first()->as_Register();
7759 int oop_slot;
7760 if (rOop == j_rarg0)
7761 oop_slot = 0;
7762 else if (rOop == j_rarg1)
7763 oop_slot = 1;
7764 else if (rOop == j_rarg2)
7765 oop_slot = 2;
7766 else if (rOop == j_rarg3)
7767 oop_slot = 3;
7768 else if (rOop == j_rarg4)
7769 oop_slot = 4;
7770 else if (rOop == j_rarg5)
7771 oop_slot = 5;
7772 else if (rOop == j_rarg6)
7773 oop_slot = 6;
7774 else {
7775 assert(rOop == j_rarg7, "wrong register");
7776 oop_slot = 7;
7777 }
7778
7779 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
7780 int offset = oop_slot*VMRegImpl::stack_slot_size;
7781
7782 map->set_oop(VMRegImpl::stack2reg(oop_slot));
7783 // Store oop in handle area, may be null
7784 str(rOop, Address(sp, offset));
7785 if (is_receiver) {
7786 *receiver_offset = offset;
7787 }
7788
7789 cmp(rOop, zr);
7790 lea(rHandle, Address(sp, offset));
7791 // conditionally move a null
7792 csel(rHandle, zr, rHandle, Assembler::EQ);
7793 }
7794
7795 // If arg is on the stack then place it otherwise it is already in correct reg.
7796 if (dst.first()->is_stack()) {
7797 str(rHandle, Address(sp, reg2offset_out(dst.first())));
7798 }
7799 }
7800
7801 // A float arg may have to do float reg int reg conversion
7802 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
7803 if (src.first()->is_stack()) {
7804 if (dst.first()->is_stack()) {
7805 ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
7806 strw(tmp, Address(sp, reg2offset_out(dst.first())));
7807 } else {
7808 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7809 }
7810 } else if (src.first() != dst.first()) {
7811 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7812 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7813 else
7814 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7815 }
7816 }
7817
7818 // A long move
7819 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
7820 if (src.first()->is_stack()) {
7821 if (dst.first()->is_stack()) {
7822 // stack to stack
7823 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7824 str(tmp, Address(sp, reg2offset_out(dst.first())));
7825 } else {
7826 // stack to reg
7827 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7828 }
7829 } else if (dst.first()->is_stack()) {
7830 // reg to stack
7831 // Do we really have to sign extend???
7832 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
7833 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7834 } else {
7835 if (dst.first() != src.first()) {
7836 mov(dst.first()->as_Register(), src.first()->as_Register());
7837 }
7838 }
7839 }
7840
7841
7842 // A double move
7843 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
7844 if (src.first()->is_stack()) {
7845 if (dst.first()->is_stack()) {
7846 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7847 str(tmp, Address(sp, reg2offset_out(dst.first())));
7848 } else {
7849 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7850 }
7851 } else if (src.first() != dst.first()) {
7852 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7853 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7854 else
7855 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7856 }
7857 }
7858
7859 // Implements lightweight-locking.
7860 //
7861 // - obj: the object to be locked
7862 // - t1, t2, t3: temporary registers, will be destroyed
7863 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
7864 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
7865 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
7866
7867 Label push;
7868 const Register top = t1;
7869 const Register mark = t2;
7870 const Register t = t3;
7871
7872 // Preload the markWord. It is important that this is the first
7873 // instruction emitted as it is part of C1's null check semantics.
7874 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7875
7876 if (UseObjectMonitorTable) {
7877 // Clear cache in case fast locking succeeds or we need to take the slow-path.
7878 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
7879 }
7880
7881 if (DiagnoseSyncOnValueBasedClasses != 0) {
7882 load_klass(t1, obj);
7883 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
7884 tst(t1, KlassFlags::_misc_is_value_based_class);
7885 br(Assembler::NE, slow);
7886 }
7887
7888 // Check if the lock-stack is full.
7889 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7890 cmpw(top, (unsigned)LockStack::end_offset());
7891 br(Assembler::GE, slow);
7892
7893 // Check for recursion.
7894 subw(t, top, oopSize);
7895 ldr(t, Address(rthread, t));
7896 cmp(obj, t);
7897 br(Assembler::EQ, push);
7898
7899 // Check header for monitor (0b10).
7900 tst(mark, markWord::monitor_value);
7901 br(Assembler::NE, slow);
7902
7903 // Try to lock. Transition lock bits 0b01 => 0b00
7904 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7905 orr(mark, mark, markWord::unlocked_value);
7906 if (EnableValhalla) {
7907 // Mask inline_type bit such that we go to the slow path if object is an inline type
7908 andr(mark, mark, ~((int) markWord::inline_type_bit_in_place));
7909 }
7910 eor(t, mark, markWord::unlocked_value);
7911 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
7912 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
7913 br(Assembler::NE, slow);
7914
7915 bind(push);
7916 // After successful lock, push object on lock-stack.
7917 str(obj, Address(rthread, top));
7918 addw(top, top, oopSize);
7919 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7920 }
7921
7922 // Implements lightweight-unlocking.
7923 //
7924 // - obj: the object to be unlocked
7925 // - t1, t2, t3: temporary registers
7926 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
7927 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
7928 // cmpxchg clobbers rscratch1.
7929 assert_different_registers(obj, t1, t2, t3, rscratch1);
7930
7931 #ifdef ASSERT
7932 {
7933 // Check for lock-stack underflow.
7934 Label stack_ok;
7935 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7936 cmpw(t1, (unsigned)LockStack::start_offset());
7937 br(Assembler::GE, stack_ok);
7938 STOP("Lock-stack underflow");
7939 bind(stack_ok);
7940 }
7941 #endif
7942
7943 Label unlocked, push_and_slow;
7944 const Register top = t1;
7945 const Register mark = t2;
7946 const Register t = t3;
7947
7948 // Check if obj is top of lock-stack.
7949 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7950 subw(top, top, oopSize);
7951 ldr(t, Address(rthread, top));
7952 cmp(obj, t);
7953 br(Assembler::NE, slow);
7954
7955 // Pop lock-stack.
7956 DEBUG_ONLY(str(zr, Address(rthread, top));)
7957 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7958
7959 // Check if recursive.
7960 subw(t, top, oopSize);
7961 ldr(t, Address(rthread, t));
7962 cmp(obj, t);
7963 br(Assembler::EQ, unlocked);
7964
7965 // Not recursive. Check header for monitor (0b10).
7966 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7967 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
7968
7969 #ifdef ASSERT
7970 // Check header not unlocked (0b01).
7971 Label not_unlocked;
7972 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
7973 stop("lightweight_unlock already unlocked");
7974 bind(not_unlocked);
7975 #endif
7976
7977 // Try to unlock. Transition lock bits 0b00 => 0b01
7978 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7979 orr(t, mark, markWord::unlocked_value);
7980 cmpxchg(obj, mark, t, Assembler::xword,
7981 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
7982 br(Assembler::EQ, unlocked);
7983
7984 bind(push_and_slow);
7985 // Restore lock-stack and handle the unlock in runtime.
7986 DEBUG_ONLY(str(obj, Address(rthread, top));)
7987 addw(top, top, oopSize);
7988 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7989 b(slow);
7990
7991 bind(unlocked);
7992 }