1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciInlineKlass.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/resolvedFieldEntry.hpp"
52 #include "runtime/arguments.hpp"
53 #include "runtime/continuation.hpp"
54 #include "runtime/globals.hpp"
55 #include "runtime/icache.hpp"
56 #include "runtime/interfaceSupport.inline.hpp"
57 #include "runtime/javaThread.hpp"
58 #include "runtime/jniHandles.inline.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "runtime/signature_cc.hpp"
61 #include "runtime/stubRoutines.hpp"
62 #include "utilities/globalDefinitions.hpp"
63 #include "utilities/integerCast.hpp"
64 #include "utilities/powerOfTwo.hpp"
65 #include "vmreg_aarch64.inline.hpp"
66 #ifdef COMPILER1
67 #include "c1/c1_LIRAssembler.hpp"
68 #endif
69 #ifdef COMPILER2
70 #include "oops/oop.hpp"
71 #include "opto/compile.hpp"
72 #include "opto/node.hpp"
73 #include "opto/output.hpp"
74 #endif
75
76 #include <sys/types.h>
77
78 #ifdef PRODUCT
79 #define BLOCK_COMMENT(str) /* nothing */
80 #else
81 #define BLOCK_COMMENT(str) block_comment(str)
82 #endif
83 #define STOP(str) stop(str);
84 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
85
86 #ifdef ASSERT
87 extern "C" void disnm(intptr_t p);
88 #endif
89 // Target-dependent relocation processing
90 //
91 // Instruction sequences whose target may need to be retrieved or
92 // patched are distinguished by their leading instruction, sorting
93 // them into three main instruction groups and related subgroups.
94 //
95 // 1) Branch, Exception and System (insn count = 1)
96 // 1a) Unconditional branch (immediate):
97 // b/bl imm19
98 // 1b) Compare & branch (immediate):
99 // cbz/cbnz Rt imm19
100 // 1c) Test & branch (immediate):
101 // tbz/tbnz Rt imm14
102 // 1d) Conditional branch (immediate):
103 // b.cond imm19
104 //
105 // 2) Loads and Stores (insn count = 1)
106 // 2a) Load register literal:
107 // ldr Rt imm19
108 //
109 // 3) Data Processing Immediate (insn count = 2 or 3)
110 // 3a) PC-rel. addressing
111 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12
112 // adr/adrp Rx imm21; add Ry Rx #imm12
113 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
114 // adr/adrp Rx imm21
115 // adr/adrp Rx imm21; movk Rx #imm16<<32
116 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
117 // The latter form can only happen when the target is an
118 // ExternalAddress, and (by definition) ExternalAddresses don't
119 // move. Because of that property, there is never any need to
120 // patch the last of the three instructions. However,
121 // MacroAssembler::target_addr_for_insn takes all three
122 // instructions into account and returns the correct address.
123 // 3b) Move wide (immediate)
124 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
125 //
126 // A switch on a subset of the instruction's bits provides an
127 // efficient dispatch to these subcases.
128 //
129 // insn[28:26] -> main group ('x' == don't care)
130 // 00x -> UNALLOCATED
131 // 100 -> Data Processing Immediate
132 // 101 -> Branch, Exception and System
133 // x1x -> Loads and Stores
134 //
135 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
136 // n.b. in some cases extra bits need to be checked to verify the
137 // instruction is as expected
138 //
139 // 1) ... xx101x Branch, Exception and System
140 // 1a) 00___x Unconditional branch (immediate)
141 // 1b) 01___0 Compare & branch (immediate)
142 // 1c) 01___1 Test & branch (immediate)
143 // 1d) 10___0 Conditional branch (immediate)
144 // other Should not happen
145 //
146 // 2) ... xxx1x0 Loads and Stores
147 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
148 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
149 // strictly should be 64 bit non-FP/SIMD i.e.
150 // 0101_000 (i.e. requires insn[31:24] == 01011000)
151 //
152 // 3) ... xx100x Data Processing Immediate
153 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
154 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
155 // strictly should be 64 bit movz #imm16<<0
156 // 110___10100 (i.e. requires insn[31:21] == 11010010100)
157 //
158
159 static uint32_t insn_at(address insn_addr, int n) {
160 return ((uint32_t*)insn_addr)[n];
161 }
162
163 template<typename T>
164 class RelocActions : public AllStatic {
165
166 public:
167
168 static int ALWAYSINLINE run(address insn_addr, address &target) {
169 int instructions = 1;
170 uint32_t insn = insn_at(insn_addr, 0);
171
172 uint32_t dispatch = Instruction_aarch64::extract(insn, 30, 25);
173 switch(dispatch) {
174 case 0b001010:
175 case 0b001011: {
176 instructions = T::unconditionalBranch(insn_addr, target);
177 break;
178 }
179 case 0b101010: // Conditional branch (immediate)
180 case 0b011010: { // Compare & branch (immediate)
181 instructions = T::conditionalBranch(insn_addr, target);
182 break;
183 }
184 case 0b011011: {
185 instructions = T::testAndBranch(insn_addr, target);
186 break;
187 }
188 case 0b001100:
189 case 0b001110:
190 case 0b011100:
191 case 0b011110:
192 case 0b101100:
193 case 0b101110:
194 case 0b111100:
195 case 0b111110: {
196 // load/store
197 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
198 // Load register (literal)
199 instructions = T::loadStore(insn_addr, target);
200 break;
201 } else {
202 // nothing to do
203 assert(target == nullptr, "did not expect to relocate target for polling page load");
204 }
205 break;
206 }
207 case 0b001000:
208 case 0b011000:
209 case 0b101000:
210 case 0b111000: {
211 // adr/adrp
212 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
213 int shift = Instruction_aarch64::extract(insn, 31, 31);
214 if (shift) {
215 uint32_t insn2 = insn_at(insn_addr, 1);
216 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
217 Instruction_aarch64::extract(insn, 4, 0) ==
218 Instruction_aarch64::extract(insn2, 9, 5)) {
219 instructions = T::adrp(insn_addr, target, T::adrpMem);
220 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
221 Instruction_aarch64::extract(insn, 4, 0) ==
222 Instruction_aarch64::extract(insn2, 4, 0)) {
223 instructions = T::adrp(insn_addr, target, T::adrpAdd);
224 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
225 Instruction_aarch64::extract(insn, 4, 0) ==
226 Instruction_aarch64::extract(insn2, 4, 0)) {
227 instructions = T::adrp(insn_addr, target, T::adrpMovk);
228 } else {
229 ShouldNotReachHere();
230 }
231 } else {
232 instructions = T::adr(insn_addr, target);
233 }
234 break;
235 }
236 case 0b001001:
237 case 0b011001:
238 case 0b101001:
239 case 0b111001: {
240 instructions = T::immediate(insn_addr, target);
241 break;
242 }
243 default: {
244 ShouldNotReachHere();
245 }
246 }
247
248 T::verify(insn_addr, target);
249 return instructions * NativeInstruction::instruction_size;
250 }
251 };
252
253 class Patcher : public AllStatic {
254 public:
255 static int unconditionalBranch(address insn_addr, address &target) {
256 intptr_t offset = (target - insn_addr) >> 2;
257 Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
258 return 1;
259 }
260 static int conditionalBranch(address insn_addr, address &target) {
261 intptr_t offset = (target - insn_addr) >> 2;
262 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
263 return 1;
264 }
265 static int testAndBranch(address insn_addr, address &target) {
266 intptr_t offset = (target - insn_addr) >> 2;
267 Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
268 return 1;
269 }
270 static int loadStore(address insn_addr, address &target) {
271 intptr_t offset = (target - insn_addr) >> 2;
272 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
273 return 1;
274 }
275 static int adr(address insn_addr, address &target) {
276 #ifdef ASSERT
277 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
278 #endif
279 // PC-rel. addressing
280 ptrdiff_t offset = target - insn_addr;
281 int offset_lo = offset & 3;
282 offset >>= 2;
283 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
284 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
285 return 1;
286 }
287 template<typename U>
288 static int adrp(address insn_addr, address &target, U inner) {
289 int instructions = 1;
290 #ifdef ASSERT
291 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
292 #endif
293 ptrdiff_t offset = target - insn_addr;
294 instructions = 2;
295 precond(inner != nullptr);
296 // Give the inner reloc a chance to modify the target.
297 address adjusted_target = target;
298 instructions = inner(insn_addr, adjusted_target);
299 uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
300 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
301 offset = adr_page - pc_page;
302 int offset_lo = offset & 3;
303 offset >>= 2;
304 Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
305 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
306 return instructions;
307 }
308 static int adrpMem(address insn_addr, address &target) {
309 uintptr_t dest = (uintptr_t)target;
310 int offset_lo = dest & 0xfff;
311 uint32_t insn2 = insn_at(insn_addr, 1);
312 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
313 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
314 guarantee(((dest >> size) << size) == dest, "misaligned target");
315 return 2;
316 }
317 static int adrpAdd(address insn_addr, address &target) {
318 uintptr_t dest = (uintptr_t)target;
319 int offset_lo = dest & 0xfff;
320 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
321 return 2;
322 }
323 static int adrpMovk(address insn_addr, address &target) {
324 uintptr_t dest = uintptr_t(target);
325 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
326 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
327 target = address(dest);
328 return 2;
329 }
330 static int immediate(address insn_addr, address &target) {
331 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
332 uint64_t dest = (uint64_t)target;
333 // Move wide constant
334 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
335 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
336 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
337 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
338 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
339 return 3;
340 }
341 static void verify(address insn_addr, address &target) {
342 #ifdef ASSERT
343 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
344 if (!(address_is == target)) {
345 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
346 disnm((intptr_t)insn_addr);
347 assert(address_is == target, "should be");
348 }
349 #endif
350 }
351 };
352
353 // If insn1 and insn2 use the same register to form an address, either
354 // by an offsetted LDR or a simple ADD, return the offset. If the
355 // second instruction is an LDR, the offset may be scaled.
356 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
357 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
358 Instruction_aarch64::extract(insn1, 4, 0) ==
359 Instruction_aarch64::extract(insn2, 9, 5)) {
360 // Load/store register (unsigned immediate)
361 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
362 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
363 byte_offset <<= size;
364 return true;
365 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
366 Instruction_aarch64::extract(insn1, 4, 0) ==
367 Instruction_aarch64::extract(insn2, 4, 0)) {
368 // add (immediate)
369 byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
370 return true;
371 }
372 return false;
373 }
374
375 class AArch64Decoder : public AllStatic {
376 public:
377
378 static int loadStore(address insn_addr, address &target) {
379 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
380 target = insn_addr + (offset << 2);
381 return 1;
382 }
383 static int unconditionalBranch(address insn_addr, address &target) {
384 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 25, 0);
385 target = insn_addr + (offset << 2);
386 return 1;
387 }
388 static int conditionalBranch(address insn_addr, address &target) {
389 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
390 target = address(((uint64_t)insn_addr + (offset << 2)));
391 return 1;
392 }
393 static int testAndBranch(address insn_addr, address &target) {
394 intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 18, 5);
395 target = address(((uint64_t)insn_addr + (offset << 2)));
396 return 1;
397 }
398 static int adr(address insn_addr, address &target) {
399 // PC-rel. addressing
400 uint32_t insn = insn_at(insn_addr, 0);
401 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
402 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
403 target = address((uint64_t)insn_addr + offset);
404 return 1;
405 }
406 template<typename U>
407 static int adrp(address insn_addr, address &target, U inner) {
408 uint32_t insn = insn_at(insn_addr, 0);
409 assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
410 intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
411 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
412 int shift = 12;
413 offset <<= shift;
414 uint64_t target_page = ((uint64_t)insn_addr) + offset;
415 target_page &= ((uint64_t)-1) << shift;
416 target = address(target_page);
417 precond(inner != nullptr);
418 inner(insn_addr, target);
419 return 2;
420 }
421 static int adrpMem(address insn_addr, address &target) {
422 uint32_t insn2 = insn_at(insn_addr, 1);
423 // Load/store register (unsigned immediate)
424 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
425 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
426 byte_offset <<= size;
427 target += byte_offset;
428 return 2;
429 }
430 static int adrpAdd(address insn_addr, address &target) {
431 uint32_t insn2 = insn_at(insn_addr, 1);
432 // add (immediate)
433 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
434 target += byte_offset;
435 return 2;
436 }
437 static int adrpMovk(address insn_addr, address &target) {
438 uint32_t insn2 = insn_at(insn_addr, 1);
439 uint64_t dest = uint64_t(target);
440 dest = (dest & 0xffff0000ffffffff) |
441 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
442 target = address(dest);
443
444 // We know the destination 4k page. Maybe we have a third
445 // instruction.
446 uint32_t insn = insn_at(insn_addr, 0);
447 uint32_t insn3 = insn_at(insn_addr, 2);
448 ptrdiff_t byte_offset;
449 if (offset_for(insn, insn3, byte_offset)) {
450 target += byte_offset;
451 return 3;
452 } else {
453 return 2;
454 }
455 }
456 static int immediate(address insn_addr, address &target) {
457 uint32_t *insns = (uint32_t *)insn_addr;
458 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
459 // Move wide constant: movz, movk, movk. See movptr().
460 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
461 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
462 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
463 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
464 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
465 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
466 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
467 return 3;
468 }
469 static void verify(address insn_addr, address &target) {
470 }
471 };
472
473 address MacroAssembler::target_addr_for_insn(address insn_addr) {
474 address target;
475 RelocActions<AArch64Decoder>::run(insn_addr, target);
476 return target;
477 }
478
479 // Patch any kind of instruction; there may be several instructions.
480 // Return the total length (in bytes) of the instructions.
481 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
482 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
483 return RelocActions<Patcher>::run(insn_addr, target);
484 }
485
486 int MacroAssembler::patch_oop(address insn_addr, address o) {
487 int instructions;
488 unsigned insn = *(unsigned*)insn_addr;
489 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
490
491 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
492
493 // OOPs are either narrow (32 bits) or wide (48 bits). We encode
494 // narrow OOPs by setting the upper 16 bits in the first
495 // instruction.
496 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
497 // Move narrow OOP
498 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
499 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
500 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
501 instructions = 2;
502 } else {
503 // Move wide OOP
504 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
505 uintptr_t dest = (uintptr_t)o;
506 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
507 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
508 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
509 instructions = 3;
510 }
511 return instructions * NativeInstruction::instruction_size;
512 }
513
514 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
515 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
516 // We encode narrow ones by setting the upper 16 bits in the first
517 // instruction.
518 NativeInstruction *insn = nativeInstruction_at(insn_addr);
519 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
520 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
521
522 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
523
524 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
525 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
526 return 2 * NativeInstruction::instruction_size;
527 }
528
529 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
530 ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
531 if (at_return) {
532 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
533 // we may safely use the sp instead to perform the stack watermark check.
534 cmp(in_nmethod ? sp : rfp, tmp);
535 br(Assembler::HI, slow_path);
536 } else {
537 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
538 }
539 }
540
541 void MacroAssembler::rt_call(address dest, Register tmp) {
542 CodeBlob *cb = CodeCache::find_blob(dest);
543 if (cb) {
544 far_call(RuntimeAddress(dest));
545 } else {
546 lea(tmp, RuntimeAddress(dest));
547 blr(tmp);
548 }
549 }
550
551 void MacroAssembler::push_cont_fastpath(Register java_thread) {
552 if (!Continuations::enabled()) return;
553 Label done;
554 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
555 cmp(sp, rscratch1);
556 br(Assembler::LS, done);
557 mov(rscratch1, sp); // we can't use sp as the source in str
558 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
559 bind(done);
560 }
561
562 void MacroAssembler::pop_cont_fastpath(Register java_thread) {
563 if (!Continuations::enabled()) return;
564 Label done;
565 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
566 cmp(sp, rscratch1);
567 br(Assembler::LO, done);
568 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
569 bind(done);
570 }
571
572 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
573 // we must set sp to zero to clear frame
574 str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
575
576 // must clear fp, so that compiled frames are not confused; it is
577 // possible that we need it only for debugging
578 if (clear_fp) {
579 str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
580 }
581
582 // Always clear the pc because it could have been set by make_walkable()
583 str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
584 }
585
586 // Calls to C land
587 //
588 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
589 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
590 // has to be reset to 0. This is required to allow proper stack traversal.
591 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
592 Register last_java_fp,
593 Register last_java_pc,
594 Register scratch) {
595
596 if (last_java_pc->is_valid()) {
597 str(last_java_pc, Address(rthread,
598 JavaThread::frame_anchor_offset()
599 + JavaFrameAnchor::last_Java_pc_offset()));
600 }
601
602 // determine last_java_sp register
603 if (last_java_sp == sp) {
604 mov(scratch, sp);
605 last_java_sp = scratch;
606 } else if (!last_java_sp->is_valid()) {
607 last_java_sp = esp;
608 }
609
610 // last_java_fp is optional
611 if (last_java_fp->is_valid()) {
612 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
613 }
614
615 // We must set sp last.
616 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
617 }
618
619 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
620 Register last_java_fp,
621 address last_java_pc,
622 Register scratch) {
623 assert(last_java_pc != nullptr, "must provide a valid PC");
624
625 adr(scratch, last_java_pc);
626 str(scratch, Address(rthread,
627 JavaThread::frame_anchor_offset()
628 + JavaFrameAnchor::last_Java_pc_offset()));
629
630 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
631 }
632
633 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
634 Register last_java_fp,
635 Label &L,
636 Register scratch) {
637 if (L.is_bound()) {
638 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
639 } else {
640 InstructionMark im(this);
641 L.add_patch_at(code(), locator());
642 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
643 }
644 }
645
646 static inline bool target_needs_far_branch(address addr) {
647 if (AOTCodeCache::is_on_for_dump()) {
648 return true;
649 }
650 // codecache size <= 128M
651 if (!MacroAssembler::far_branches()) {
652 return false;
653 }
654 // codecache size > 240M
655 if (MacroAssembler::codestub_branch_needs_far_jump()) {
656 return true;
657 }
658 // codecache size: 128M..240M
659 return !CodeCache::is_non_nmethod(addr);
660 }
661
662 void MacroAssembler::far_call(Address entry, Register tmp) {
663 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
664 assert(CodeCache::find_blob(entry.target()) != nullptr,
665 "destination of far call not found in code cache");
666 assert(entry.rspec().type() == relocInfo::external_word_type
667 || entry.rspec().type() == relocInfo::runtime_call_type
668 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
669 if (target_needs_far_branch(entry.target())) {
670 uint64_t offset;
671 // We can use ADRP here because we know that the total size of
672 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
673 adrp(tmp, entry, offset);
674 add(tmp, tmp, offset);
675 blr(tmp);
676 } else {
677 bl(entry);
678 }
679 }
680
681 int MacroAssembler::far_jump(Address entry, Register tmp) {
682 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
683 assert(CodeCache::find_blob(entry.target()) != nullptr,
684 "destination of far call not found in code cache");
685 assert(entry.rspec().type() == relocInfo::external_word_type
686 || entry.rspec().type() == relocInfo::runtime_call_type
687 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
688 address start = pc();
689 if (target_needs_far_branch(entry.target())) {
690 uint64_t offset;
691 // We can use ADRP here because we know that the total size of
692 // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
693 adrp(tmp, entry, offset);
694 add(tmp, tmp, offset);
695 br(tmp);
696 } else {
697 b(entry);
698 }
699 return pc() - start;
700 }
701
702 void MacroAssembler::reserved_stack_check() {
703 // testing if reserved zone needs to be enabled
704 Label no_reserved_zone_enabling;
705
706 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
707 cmp(sp, rscratch1);
708 br(Assembler::LO, no_reserved_zone_enabling);
709
710 enter(); // LR and FP are live.
711 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)));
712 mov(c_rarg0, rthread);
713 blr(rscratch1);
714 leave();
715
716 // We have already removed our own frame.
717 // throw_delayed_StackOverflowError will think that it's been
718 // called by our caller.
719 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
720 br(rscratch1);
721 should_not_reach_here();
722
723 bind(no_reserved_zone_enabling);
724 }
725
726 static void pass_arg0(MacroAssembler* masm, Register arg) {
727 if (c_rarg0 != arg ) {
728 masm->mov(c_rarg0, arg);
729 }
730 }
731
732 static void pass_arg1(MacroAssembler* masm, Register arg) {
733 if (c_rarg1 != arg ) {
734 masm->mov(c_rarg1, arg);
735 }
736 }
737
738 static void pass_arg2(MacroAssembler* masm, Register arg) {
739 if (c_rarg2 != arg ) {
740 masm->mov(c_rarg2, arg);
741 }
742 }
743
744 static void pass_arg3(MacroAssembler* masm, Register arg) {
745 if (c_rarg3 != arg ) {
746 masm->mov(c_rarg3, arg);
747 }
748 }
749
750 void MacroAssembler::call_VM_base(Register oop_result,
751 Register java_thread,
752 Register last_java_sp,
753 Label* return_pc,
754 address entry_point,
755 int number_of_arguments,
756 bool check_exceptions) {
757 // determine java_thread register
758 if (!java_thread->is_valid()) {
759 java_thread = rthread;
760 }
761
762 // determine last_java_sp register
763 if (!last_java_sp->is_valid()) {
764 last_java_sp = esp;
765 }
766
767 // debugging support
768 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
769 assert(java_thread == rthread, "unexpected register");
770 #ifdef ASSERT
771 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
772 // if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
773 #endif // ASSERT
774
775 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
776 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
777
778 // push java thread (becomes first argument of C function)
779
780 mov(c_rarg0, java_thread);
781
782 // set last Java frame before call
783 assert(last_java_sp != rfp, "can't use rfp");
784
785 Label l;
786 set_last_Java_frame(last_java_sp, rfp, return_pc != nullptr ? *return_pc : l, rscratch1);
787
788 // do the call, remove parameters
789 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
790
791 // lr could be poisoned with PAC signature during throw_pending_exception
792 // if it was tail-call optimized by compiler, since lr is not callee-saved
793 // reload it with proper value
794 adr(lr, l);
795
796 // reset last Java frame
797 // Only interpreter should have to clear fp
798 reset_last_Java_frame(true);
799
800 // C++ interp handles this in the interpreter
801 check_and_handle_popframe(java_thread);
802 check_and_handle_earlyret(java_thread);
803
804 if (check_exceptions) {
805 // check for pending exceptions (java_thread is set upon return)
806 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
807 Label ok;
808 cbz(rscratch1, ok);
809 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
810 br(rscratch1);
811 bind(ok);
812 }
813
814 // get oop result if there is one and reset the value in the thread
815 if (oop_result->is_valid()) {
816 get_vm_result_oop(oop_result, java_thread);
817 }
818 }
819
820 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
821 call_VM_base(oop_result, noreg, noreg, nullptr, entry_point, number_of_arguments, check_exceptions);
822 }
823
824 // Check the entry target is always reachable from any branch.
825 static bool is_always_within_branch_range(Address entry) {
826 if (AOTCodeCache::is_on_for_dump()) {
827 return false;
828 }
829 const address target = entry.target();
830
831 if (!CodeCache::contains(target)) {
832 // We always use trampolines for callees outside CodeCache.
833 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
834 return false;
835 }
836
837 if (!MacroAssembler::far_branches()) {
838 return true;
839 }
840
841 if (entry.rspec().type() == relocInfo::runtime_call_type) {
842 // Runtime calls are calls of a non-compiled method (stubs, adapters).
843 // Non-compiled methods stay forever in CodeCache.
844 // We check whether the longest possible branch is within the branch range.
845 assert(CodeCache::find_blob(target) != nullptr &&
846 !CodeCache::find_blob(target)->is_nmethod(),
847 "runtime call of compiled method");
848 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
849 const address left_longest_branch_start = CodeCache::low_bound();
850 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
851 Assembler::reachable_from_branch_at(right_longest_branch_start, target);
852 return is_reachable;
853 }
854
855 return false;
856 }
857
858 // Maybe emit a call via a trampoline. If the code cache is small
859 // trampolines won't be emitted.
860 address MacroAssembler::trampoline_call(Address entry) {
861 assert(entry.rspec().type() == relocInfo::runtime_call_type
862 || entry.rspec().type() == relocInfo::opt_virtual_call_type
863 || entry.rspec().type() == relocInfo::static_call_type
864 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
865
866 address target = entry.target();
867
868 if (!is_always_within_branch_range(entry)) {
869 if (!in_scratch_emit_size()) {
870 // We don't want to emit a trampoline if C2 is generating dummy
871 // code during its branch shortening phase.
872 if (entry.rspec().type() == relocInfo::runtime_call_type) {
873 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
874 code()->share_trampoline_for(entry.target(), offset());
875 } else {
876 address stub = emit_trampoline_stub(offset(), target);
877 if (stub == nullptr) {
878 postcond(pc() == badAddress);
879 return nullptr; // CodeCache is full
880 }
881 }
882 }
883 target = pc();
884 }
885
886 address call_pc = pc();
887 relocate(entry.rspec());
888 bl(target);
889
890 postcond(pc() != badAddress);
891 return call_pc;
892 }
893
894 // Emit a trampoline stub for a call to a target which is too far away.
895 //
896 // code sequences:
897 //
898 // call-site:
899 // branch-and-link to <destination> or <trampoline stub>
900 //
901 // Related trampoline stub for this call site in the stub section:
902 // load the call target from the constant pool
903 // branch (LR still points to the call site above)
904
905 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
906 address dest) {
907 // Max stub size: alignment nop, TrampolineStub.
908 address stub = start_a_stub(max_trampoline_stub_size());
909 if (stub == nullptr) {
910 return nullptr; // CodeBuffer::expand failed
911 }
912
913 // Create a trampoline stub relocation which relates this trampoline stub
914 // with the call instruction at insts_call_instruction_offset in the
915 // instructions code-section.
916 align(wordSize);
917 relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
918 + insts_call_instruction_offset));
919 const int stub_start_offset = offset();
920
921 // Now, create the trampoline stub's code:
922 // - load the call
923 // - call
924 Label target;
925 ldr(rscratch1, target);
926 br(rscratch1);
927 bind(target);
928 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
929 "should be");
930 emit_int64((int64_t)dest);
931
932 const address stub_start_addr = addr_at(stub_start_offset);
933
934 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
935
936 end_a_stub();
937 return stub_start_addr;
938 }
939
940 int MacroAssembler::max_trampoline_stub_size() {
941 // Max stub size: alignment nop, TrampolineStub.
942 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
943 }
944
945 void MacroAssembler::emit_static_call_stub() {
946 // CompiledDirectCall::set_to_interpreted knows the
947 // exact layout of this stub.
948
949 isb();
950 mov_metadata(rmethod, nullptr);
951
952 // Jump to the entry point of the c2i stub.
953 if (codestub_branch_needs_far_jump()) {
954 movptr(rscratch1, 0);
955 br(rscratch1);
956 } else {
957 b(pc());
958 }
959 }
960
961 int MacroAssembler::static_call_stub_size() {
962 // During AOT production run AOT and JIT compiled code
963 // are used at the same time. We need this size
964 // to be the same for both types of code.
965 if (!codestub_branch_needs_far_jump() && !AOTCodeCache::is_on_for_use()) {
966 // isb; movk; movz; movz; b
967 return 5 * NativeInstruction::instruction_size;
968 }
969 // isb; movk; movz; movz; movk; movz; movz; br
970 return 8 * NativeInstruction::instruction_size;
971 }
972
973 void MacroAssembler::c2bool(Register x) {
974 // implements x == 0 ? 0 : 1
975 // note: must only look at least-significant byte of x
976 // since C-style booleans are stored in one byte
977 // only! (was bug)
978 tst(x, 0xff);
979 cset(x, Assembler::NE);
980 }
981
982 address MacroAssembler::ic_call(address entry, jint method_index) {
983 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
984 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
985 return trampoline_call(Address(entry, rh));
986 }
987
988 int MacroAssembler::ic_check_size() {
989 int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
990 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
991 return NativeInstruction::instruction_size * (7 + extra_instructions);
992 } else {
993 return NativeInstruction::instruction_size * (5 + extra_instructions);
994 }
995 }
996
997 int MacroAssembler::ic_check(int end_alignment) {
998 Register receiver = j_rarg0;
999 Register data = rscratch2;
1000 Register tmp1 = rscratch1;
1001 Register tmp2 = r10;
1002
1003 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1004 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1005 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1006 // before the inline cache check here, and not after
1007 align(end_alignment, offset() + ic_check_size());
1008
1009 int uep_offset = offset();
1010
1011 if (UseCompactObjectHeaders) {
1012 load_narrow_klass_compact(tmp1, receiver);
1013 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1014 cmpw(tmp1, tmp2);
1015 } else {
1016 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1017 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1018 cmpw(tmp1, tmp2);
1019 }
1020
1021 Label dont;
1022 br(Assembler::EQ, dont);
1023 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1024 bind(dont);
1025 assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1026
1027 return uep_offset;
1028 }
1029
1030 // Implementation of call_VM versions
1031
1032 void MacroAssembler::call_VM(Register oop_result,
1033 address entry_point,
1034 bool check_exceptions) {
1035 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1036 }
1037
1038 void MacroAssembler::call_VM(Register oop_result,
1039 address entry_point,
1040 Register arg_1,
1041 bool check_exceptions) {
1042 pass_arg1(this, arg_1);
1043 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1044 }
1045
1046 void MacroAssembler::call_VM(Register oop_result,
1047 address entry_point,
1048 Register arg_1,
1049 Register arg_2,
1050 bool check_exceptions) {
1051 assert_different_registers(arg_1, c_rarg2);
1052 pass_arg2(this, arg_2);
1053 pass_arg1(this, arg_1);
1054 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1055 }
1056
1057 void MacroAssembler::call_VM(Register oop_result,
1058 address entry_point,
1059 Register arg_1,
1060 Register arg_2,
1061 Register arg_3,
1062 bool check_exceptions) {
1063 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1064 assert_different_registers(arg_2, c_rarg3);
1065 pass_arg3(this, arg_3);
1066
1067 pass_arg2(this, arg_2);
1068
1069 pass_arg1(this, arg_1);
1070 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1071 }
1072
1073 void MacroAssembler::call_VM(Register oop_result,
1074 Register last_java_sp,
1075 address entry_point,
1076 int number_of_arguments,
1077 bool check_exceptions) {
1078 call_VM_base(oop_result, rthread, last_java_sp, nullptr, entry_point, number_of_arguments, check_exceptions);
1079 }
1080
1081 void MacroAssembler::call_VM(Register oop_result,
1082 Register last_java_sp,
1083 address entry_point,
1084 Register arg_1,
1085 bool check_exceptions) {
1086 pass_arg1(this, arg_1);
1087 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1088 }
1089
1090 void MacroAssembler::call_VM(Register oop_result,
1091 Register last_java_sp,
1092 address entry_point,
1093 Register arg_1,
1094 Register arg_2,
1095 bool check_exceptions) {
1096
1097 assert_different_registers(arg_1, c_rarg2);
1098 pass_arg2(this, arg_2);
1099 pass_arg1(this, arg_1);
1100 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1101 }
1102
1103 void MacroAssembler::call_VM(Register oop_result,
1104 Register last_java_sp,
1105 address entry_point,
1106 Register arg_1,
1107 Register arg_2,
1108 Register arg_3,
1109 bool check_exceptions) {
1110 assert_different_registers(arg_1, c_rarg2, c_rarg3);
1111 assert_different_registers(arg_2, c_rarg3);
1112 pass_arg3(this, arg_3);
1113 pass_arg2(this, arg_2);
1114 pass_arg1(this, arg_1);
1115 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1116 }
1117
1118
1119 void MacroAssembler::get_vm_result_oop(Register oop_result, Register java_thread) {
1120 ldr(oop_result, Address(java_thread, JavaThread::vm_result_oop_offset()));
1121 str(zr, Address(java_thread, JavaThread::vm_result_oop_offset()));
1122 verify_oop_msg(oop_result, "broken oop in call_VM_base");
1123 }
1124
1125 void MacroAssembler::get_vm_result_metadata(Register metadata_result, Register java_thread) {
1126 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1127 str(zr, Address(java_thread, JavaThread::vm_result_metadata_offset()));
1128 }
1129
1130 void MacroAssembler::align(int modulus) {
1131 align(modulus, offset());
1132 }
1133
1134 // Ensure that the code at target bytes offset from the current offset() is aligned
1135 // according to modulus.
1136 void MacroAssembler::align(int modulus, int target) {
1137 int delta = target - offset();
1138 while ((offset() + delta) % modulus != 0) nop();
1139 }
1140
1141 void MacroAssembler::post_call_nop() {
1142 if (!Continuations::enabled()) {
1143 return;
1144 }
1145 InstructionMark im(this);
1146 relocate(post_call_nop_Relocation::spec());
1147 InlineSkippedInstructionsCounter skipCounter(this);
1148 nop();
1149 movk(zr, 0);
1150 movk(zr, 0);
1151 }
1152
1153 // these are no-ops overridden by InterpreterMacroAssembler
1154
1155 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1156
1157 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1158
1159 // Look up the method for a megamorphic invokeinterface call.
1160 // The target method is determined by <intf_klass, itable_index>.
1161 // The receiver klass is in recv_klass.
1162 // On success, the result will be in method_result, and execution falls through.
1163 // On failure, execution transfers to the given label.
1164 void MacroAssembler::lookup_interface_method(Register recv_klass,
1165 Register intf_klass,
1166 RegisterOrConstant itable_index,
1167 Register method_result,
1168 Register scan_temp,
1169 Label& L_no_such_interface,
1170 bool return_method) {
1171 assert_different_registers(recv_klass, intf_klass, scan_temp);
1172 assert_different_registers(method_result, intf_klass, scan_temp);
1173 assert(recv_klass != method_result || !return_method,
1174 "recv_klass can be destroyed when method isn't needed");
1175 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1176 "caller must use same register for non-constant itable index as for method");
1177
1178 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1179 int vtable_base = in_bytes(Klass::vtable_start_offset());
1180 int itentry_off = in_bytes(itableMethodEntry::method_offset());
1181 int scan_step = itableOffsetEntry::size() * wordSize;
1182 int vte_size = vtableEntry::size_in_bytes();
1183 assert(vte_size == wordSize, "else adjust times_vte_scale");
1184
1185 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1186
1187 // Could store the aligned, prescaled offset in the klass.
1188 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1189 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1190 add(scan_temp, scan_temp, vtable_base);
1191
1192 if (return_method) {
1193 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1194 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1195 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1196 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1197 if (itentry_off)
1198 add(recv_klass, recv_klass, itentry_off);
1199 }
1200
1201 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1202 // if (scan->interface() == intf) {
1203 // result = (klass + scan->offset() + itable_index);
1204 // }
1205 // }
1206 Label search, found_method;
1207
1208 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1209 cmp(intf_klass, method_result);
1210 br(Assembler::EQ, found_method);
1211 bind(search);
1212 // Check that the previous entry is non-null. A null entry means that
1213 // the receiver class doesn't implement the interface, and wasn't the
1214 // same as when the caller was compiled.
1215 cbz(method_result, L_no_such_interface);
1216 if (itableOffsetEntry::interface_offset() != 0) {
1217 add(scan_temp, scan_temp, scan_step);
1218 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1219 } else {
1220 ldr(method_result, Address(pre(scan_temp, scan_step)));
1221 }
1222 cmp(intf_klass, method_result);
1223 br(Assembler::NE, search);
1224
1225 bind(found_method);
1226
1227 // Got a hit.
1228 if (return_method) {
1229 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1230 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1231 }
1232 }
1233
1234 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1235 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1236 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1237 // The target method is determined by <holder_klass, itable_index>.
1238 // The receiver klass is in recv_klass.
1239 // On success, the result will be in method_result, and execution falls through.
1240 // On failure, execution transfers to the given label.
1241 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1242 Register holder_klass,
1243 Register resolved_klass,
1244 Register method_result,
1245 Register temp_itbl_klass,
1246 Register scan_temp,
1247 int itable_index,
1248 Label& L_no_such_interface) {
1249 // 'method_result' is only used as output register at the very end of this method.
1250 // Until then we can reuse it as 'holder_offset'.
1251 Register holder_offset = method_result;
1252 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1253
1254 int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1255 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1256 int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1257 int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1258
1259 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1260
1261 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1262 add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1263 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1264 // temp_itbl_klass = itable[0]._interface;
1265 int vtblEntrySize = vtableEntry::size_in_bytes();
1266 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1267 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1268 mov(holder_offset, zr);
1269 // scan_temp = &(itable[0]._interface)
1270 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1271
1272 // Initial checks:
1273 // - if (holder_klass != resolved_klass), go to "scan for resolved"
1274 // - if (itable[0] == holder_klass), shortcut to "holder found"
1275 // - if (itable[0] == 0), no such interface
1276 cmp(resolved_klass, holder_klass);
1277 br(Assembler::NE, L_loop_search_resolved_entry);
1278 cmp(holder_klass, temp_itbl_klass);
1279 br(Assembler::EQ, L_holder_found);
1280 cbz(temp_itbl_klass, L_no_such_interface);
1281
1282 // Loop: Look for holder_klass record in itable
1283 // do {
1284 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1285 // if (temp_itbl_klass == holder_klass) {
1286 // goto L_holder_found; // Found!
1287 // }
1288 // } while (temp_itbl_klass != 0);
1289 // goto L_no_such_interface // Not found.
1290 Label L_search_holder;
1291 bind(L_search_holder);
1292 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1293 cmp(holder_klass, temp_itbl_klass);
1294 br(Assembler::EQ, L_holder_found);
1295 cbnz(temp_itbl_klass, L_search_holder);
1296
1297 b(L_no_such_interface);
1298
1299 // Loop: Look for resolved_class record in itable
1300 // while (true) {
1301 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1302 // if (temp_itbl_klass == 0) {
1303 // goto L_no_such_interface;
1304 // }
1305 // if (temp_itbl_klass == resolved_klass) {
1306 // goto L_resolved_found; // Found!
1307 // }
1308 // if (temp_itbl_klass == holder_klass) {
1309 // holder_offset = scan_temp;
1310 // }
1311 // }
1312 //
1313 Label L_loop_search_resolved;
1314 bind(L_loop_search_resolved);
1315 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1316 bind(L_loop_search_resolved_entry);
1317 cbz(temp_itbl_klass, L_no_such_interface);
1318 cmp(resolved_klass, temp_itbl_klass);
1319 br(Assembler::EQ, L_resolved_found);
1320 cmp(holder_klass, temp_itbl_klass);
1321 br(Assembler::NE, L_loop_search_resolved);
1322 mov(holder_offset, scan_temp);
1323 b(L_loop_search_resolved);
1324
1325 // See if we already have a holder klass. If not, go and scan for it.
1326 bind(L_resolved_found);
1327 cbz(holder_offset, L_search_holder);
1328 mov(scan_temp, holder_offset);
1329
1330 // Finally, scan_temp contains holder_klass vtable offset
1331 bind(L_holder_found);
1332 ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1333 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1334 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1335 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1336 }
1337
1338 // virtual method calling
1339 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1340 RegisterOrConstant vtable_index,
1341 Register method_result) {
1342 assert(vtableEntry::size() * wordSize == 8,
1343 "adjust the scaling in the code below");
1344 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1345
1346 if (vtable_index.is_register()) {
1347 lea(method_result, Address(recv_klass,
1348 vtable_index.as_register(),
1349 Address::lsl(LogBytesPerWord)));
1350 ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1351 } else {
1352 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1353 ldr(method_result,
1354 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1355 }
1356 }
1357
1358 void MacroAssembler::check_klass_subtype(Register sub_klass,
1359 Register super_klass,
1360 Register temp_reg,
1361 Label& L_success) {
1362 Label L_failure;
1363 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
1364 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1365 bind(L_failure);
1366 }
1367
1368
1369 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1370 Register super_klass,
1371 Register temp_reg,
1372 Label* L_success,
1373 Label* L_failure,
1374 Label* L_slow_path,
1375 Register super_check_offset) {
1376 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset);
1377 bool must_load_sco = ! super_check_offset->is_valid();
1378 if (must_load_sco) {
1379 assert(temp_reg != noreg, "supply either a temp or a register offset");
1380 }
1381
1382 Label L_fallthrough;
1383 int label_nulls = 0;
1384 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1385 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1386 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1387 assert(label_nulls <= 1, "at most one null in the batch");
1388
1389 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1390 Address super_check_offset_addr(super_klass, sco_offset);
1391
1392 // Hacked jmp, which may only be used just before L_fallthrough.
1393 #define final_jmp(label) \
1394 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
1395 else b(label) /*omit semi*/
1396
1397 // If the pointers are equal, we are done (e.g., String[] elements).
1398 // This self-check enables sharing of secondary supertype arrays among
1399 // non-primary types such as array-of-interface. Otherwise, each such
1400 // type would need its own customized SSA.
1401 // We move this check to the front of the fast path because many
1402 // type checks are in fact trivially successful in this manner,
1403 // so we get a nicely predicted branch right at the start of the check.
1404 cmp(sub_klass, super_klass);
1405 br(Assembler::EQ, *L_success);
1406
1407 // Check the supertype display:
1408 if (must_load_sco) {
1409 ldrw(temp_reg, super_check_offset_addr);
1410 super_check_offset = temp_reg;
1411 }
1412
1413 Address super_check_addr(sub_klass, super_check_offset);
1414 ldr(rscratch1, super_check_addr);
1415 cmp(super_klass, rscratch1); // load displayed supertype
1416 br(Assembler::EQ, *L_success);
1417
1418 // This check has worked decisively for primary supers.
1419 // Secondary supers are sought in the super_cache ('super_cache_addr').
1420 // (Secondary supers are interfaces and very deeply nested subtypes.)
1421 // This works in the same check above because of a tricky aliasing
1422 // between the super_cache and the primary super display elements.
1423 // (The 'super_check_addr' can address either, as the case requires.)
1424 // Note that the cache is updated below if it does not help us find
1425 // what we need immediately.
1426 // So if it was a primary super, we can just fail immediately.
1427 // Otherwise, it's the slow path for us (no success at this point).
1428
1429 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
1430 if (L_failure == &L_fallthrough) {
1431 cbz(rscratch1, *L_slow_path);
1432 } else {
1433 cbnz(rscratch1, *L_failure);
1434 final_jmp(*L_slow_path);
1435 }
1436
1437 bind(L_fallthrough);
1438
1439 #undef final_jmp
1440 }
1441
1442 // These two are taken from x86, but they look generally useful
1443
1444 // scans count pointer sized words at [addr] for occurrence of value,
1445 // generic
1446 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1447 Register scratch) {
1448 Label Lloop, Lexit;
1449 cbz(count, Lexit);
1450 bind(Lloop);
1451 ldr(scratch, post(addr, wordSize));
1452 cmp(value, scratch);
1453 br(EQ, Lexit);
1454 sub(count, count, 1);
1455 cbnz(count, Lloop);
1456 bind(Lexit);
1457 }
1458
1459 // scans count 4 byte words at [addr] for occurrence of value,
1460 // generic
1461 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1462 Register scratch) {
1463 Label Lloop, Lexit;
1464 cbz(count, Lexit);
1465 bind(Lloop);
1466 ldrw(scratch, post(addr, wordSize));
1467 cmpw(value, scratch);
1468 br(EQ, Lexit);
1469 sub(count, count, 1);
1470 cbnz(count, Lloop);
1471 bind(Lexit);
1472 }
1473
1474 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
1475 Register super_klass,
1476 Register temp_reg,
1477 Register temp2_reg,
1478 Label* L_success,
1479 Label* L_failure,
1480 bool set_cond_codes) {
1481 // NB! Callers may assume that, when temp2_reg is a valid register,
1482 // this code sets it to a nonzero value.
1483
1484 assert_different_registers(sub_klass, super_klass, temp_reg);
1485 if (temp2_reg != noreg)
1486 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1487 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1488
1489 Label L_fallthrough;
1490 int label_nulls = 0;
1491 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1492 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1493 assert(label_nulls <= 1, "at most one null in the batch");
1494
1495 // a couple of useful fields in sub_klass:
1496 int ss_offset = in_bytes(Klass::secondary_supers_offset());
1497 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1498 Address secondary_supers_addr(sub_klass, ss_offset);
1499 Address super_cache_addr( sub_klass, sc_offset);
1500
1501 BLOCK_COMMENT("check_klass_subtype_slow_path");
1502
1503 // Do a linear scan of the secondary super-klass chain.
1504 // This code is rarely used, so simplicity is a virtue here.
1505 // The repne_scan instruction uses fixed registers, which we must spill.
1506 // Don't worry too much about pre-existing connections with the input regs.
1507
1508 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1509 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1510
1511 RegSet pushed_registers;
1512 if (!IS_A_TEMP(r2)) pushed_registers += r2;
1513 if (!IS_A_TEMP(r5)) pushed_registers += r5;
1514
1515 if (super_klass != r0) {
1516 if (!IS_A_TEMP(r0)) pushed_registers += r0;
1517 }
1518
1519 push(pushed_registers, sp);
1520
1521 // Get super_klass value into r0 (even if it was in r5 or r2).
1522 if (super_klass != r0) {
1523 mov(r0, super_klass);
1524 }
1525
1526 #ifndef PRODUCT
1527 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1528 #endif //PRODUCT
1529
1530 // We will consult the secondary-super array.
1531 ldr(r5, secondary_supers_addr);
1532 // Load the array length.
1533 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1534 // Skip to start of data.
1535 add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1536
1537 cmp(sp, zr); // Clear Z flag; SP is never zero
1538 // Scan R2 words at [R5] for an occurrence of R0.
1539 // Set NZ/Z based on last compare.
1540 repne_scan(r5, r0, r2, rscratch1);
1541
1542 // Unspill the temp. registers:
1543 pop(pushed_registers, sp);
1544
1545 br(Assembler::NE, *L_failure);
1546
1547 // Success. Cache the super we found and proceed in triumph.
1548
1549 if (UseSecondarySupersCache) {
1550 str(super_klass, super_cache_addr);
1551 }
1552
1553 if (L_success != &L_fallthrough) {
1554 b(*L_success);
1555 }
1556
1557 #undef IS_A_TEMP
1558
1559 bind(L_fallthrough);
1560 }
1561
1562 // If Register r is invalid, remove a new register from
1563 // available_regs, and add new register to regs_to_push.
1564 Register MacroAssembler::allocate_if_noreg(Register r,
1565 RegSetIterator<Register> &available_regs,
1566 RegSet ®s_to_push) {
1567 if (!r->is_valid()) {
1568 r = *available_regs++;
1569 regs_to_push += r;
1570 }
1571 return r;
1572 }
1573
1574 // check_klass_subtype_slow_path_table() looks for super_klass in the
1575 // hash table belonging to super_klass, branching to L_success or
1576 // L_failure as appropriate. This is essentially a shim which
1577 // allocates registers as necessary then calls
1578 // lookup_secondary_supers_table() to do the work. Any of the temp
1579 // regs may be noreg, in which case this logic will chooses some
1580 // registers push and pop them from the stack.
1581 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
1582 Register super_klass,
1583 Register temp_reg,
1584 Register temp2_reg,
1585 Register temp3_reg,
1586 Register result_reg,
1587 FloatRegister vtemp,
1588 Label* L_success,
1589 Label* L_failure,
1590 bool set_cond_codes) {
1591 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
1592
1593 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1594
1595 Label L_fallthrough;
1596 int label_nulls = 0;
1597 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1598 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1599 assert(label_nulls <= 1, "at most one null in the batch");
1600
1601 BLOCK_COMMENT("check_klass_subtype_slow_path");
1602
1603 RegSetIterator<Register> available_regs
1604 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin();
1605
1606 RegSet pushed_regs;
1607
1608 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
1609 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
1610 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
1611 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
1612
1613 push(pushed_regs, sp);
1614
1615 lookup_secondary_supers_table_var(sub_klass,
1616 super_klass,
1617 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg,
1618 nullptr);
1619 cmp(result_reg, zr);
1620
1621 // Unspill the temp. registers:
1622 pop(pushed_regs, sp);
1623
1624 // NB! Callers may assume that, when set_cond_codes is true, this
1625 // code sets temp2_reg to a nonzero value.
1626 if (set_cond_codes) {
1627 mov(temp2_reg, 1);
1628 }
1629
1630 br(Assembler::NE, *L_failure);
1631
1632 if (L_success != &L_fallthrough) {
1633 b(*L_success);
1634 }
1635
1636 bind(L_fallthrough);
1637 }
1638
1639 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1640 Register super_klass,
1641 Register temp_reg,
1642 Register temp2_reg,
1643 Label* L_success,
1644 Label* L_failure,
1645 bool set_cond_codes) {
1646 if (UseSecondarySupersTable) {
1647 check_klass_subtype_slow_path_table
1648 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg,
1649 /*vtemp*/fnoreg,
1650 L_success, L_failure, set_cond_codes);
1651 } else {
1652 check_klass_subtype_slow_path_linear
1653 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes);
1654 }
1655 }
1656
1657
1658 // Ensure that the inline code and the stub are using the same registers.
1659 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
1660 do { \
1661 assert(r_super_klass == r0 && \
1662 r_array_base == r1 && \
1663 r_array_length == r2 && \
1664 (r_array_index == r3 || r_array_index == noreg) && \
1665 (r_sub_klass == r4 || r_sub_klass == noreg) && \
1666 (r_bitmap == rscratch2 || r_bitmap == noreg) && \
1667 (result == r5 || result == noreg), "registers must match aarch64.ad"); \
1668 } while(0)
1669
1670 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
1671 Register r_super_klass,
1672 Register temp1,
1673 Register temp2,
1674 Register temp3,
1675 FloatRegister vtemp,
1676 Register result,
1677 u1 super_klass_slot,
1678 bool stub_is_near) {
1679 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1680
1681 Label L_fallthrough;
1682
1683 BLOCK_COMMENT("lookup_secondary_supers_table {");
1684
1685 const Register
1686 r_array_base = temp1, // r1
1687 r_array_length = temp2, // r2
1688 r_array_index = temp3, // r3
1689 r_bitmap = rscratch2;
1690
1691 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1692
1693 u1 bit = super_klass_slot;
1694
1695 // Make sure that result is nonzero if the TBZ below misses.
1696 mov(result, 1);
1697
1698 // We're going to need the bitmap in a vector reg and in a core reg,
1699 // so load both now.
1700 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1701 if (bit != 0) {
1702 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1703 }
1704 // First check the bitmap to see if super_klass might be present. If
1705 // the bit is zero, we are certain that super_klass is not one of
1706 // the secondary supers.
1707 tbz(r_bitmap, bit, L_fallthrough);
1708
1709 // Get the first array index that can contain super_klass into r_array_index.
1710 if (bit != 0) {
1711 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1712 cnt(vtemp, T8B, vtemp);
1713 addv(vtemp, T8B, vtemp);
1714 fmovd(r_array_index, vtemp);
1715 } else {
1716 mov(r_array_index, (u1)1);
1717 }
1718 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1719
1720 // We will consult the secondary-super array.
1721 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1722
1723 // The value i in r_array_index is >= 1, so even though r_array_base
1724 // points to the length, we don't need to adjust it to point to the
1725 // data.
1726 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1727 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1728
1729 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1730 eor(result, result, r_super_klass);
1731 cbz(result, L_fallthrough); // Found a match
1732
1733 // Is there another entry to check? Consult the bitmap.
1734 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1735
1736 // Linear probe.
1737 if (bit != 0) {
1738 ror(r_bitmap, r_bitmap, bit);
1739 }
1740
1741 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1742 // The next slot to be inspected, by the stub we're about to call,
1743 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1744 // have been checked.
1745 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1746 if (stub_is_near) {
1747 bl(stub);
1748 } else {
1749 address call = trampoline_call(stub);
1750 if (call == nullptr) {
1751 return false; // trampoline allocation failed
1752 }
1753 }
1754
1755 BLOCK_COMMENT("} lookup_secondary_supers_table");
1756
1757 bind(L_fallthrough);
1758
1759 if (VerifySecondarySupers) {
1760 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1761 temp1, temp2, result); // r1, r2, r5
1762 }
1763 return true;
1764 }
1765
1766 // At runtime, return 0 in result if r_super_klass is a superclass of
1767 // r_sub_klass, otherwise return nonzero. Use this version of
1768 // lookup_secondary_supers_table() if you don't know ahead of time
1769 // which superclass will be searched for. Used by interpreter and
1770 // runtime stubs. It is larger and has somewhat greater latency than
1771 // the version above, which takes a constant super_klass_slot.
1772 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
1773 Register r_super_klass,
1774 Register temp1,
1775 Register temp2,
1776 Register temp3,
1777 FloatRegister vtemp,
1778 Register result,
1779 Label *L_success) {
1780 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1781
1782 Label L_fallthrough;
1783
1784 BLOCK_COMMENT("lookup_secondary_supers_table {");
1785
1786 const Register
1787 r_array_index = temp3,
1788 slot = rscratch1,
1789 r_bitmap = rscratch2;
1790
1791 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
1792
1793 // Make sure that result is nonzero if the test below misses.
1794 mov(result, 1);
1795
1796 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1797
1798 // First check the bitmap to see if super_klass might be present. If
1799 // the bit is zero, we are certain that super_klass is not one of
1800 // the secondary supers.
1801
1802 // This next instruction is equivalent to:
1803 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1804 // sub(temp2, tmp_reg, slot);
1805 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1806 lslv(temp2, r_bitmap, temp2);
1807 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough);
1808
1809 bool must_save_v0 = (vtemp == fnoreg);
1810 if (must_save_v0) {
1811 // temp1 and result are free, so use them to preserve vtemp
1812 vtemp = v0;
1813 mov(temp1, vtemp, D, 0);
1814 mov(result, vtemp, D, 1);
1815 }
1816
1817 // Get the first array index that can contain super_klass into r_array_index.
1818 mov(vtemp, D, 0, temp2);
1819 cnt(vtemp, T8B, vtemp);
1820 addv(vtemp, T8B, vtemp);
1821 mov(r_array_index, vtemp, D, 0);
1822
1823 if (must_save_v0) {
1824 mov(vtemp, D, 0, temp1 );
1825 mov(vtemp, D, 1, result);
1826 }
1827
1828 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1829
1830 const Register
1831 r_array_base = temp1,
1832 r_array_length = temp2;
1833
1834 // The value i in r_array_index is >= 1, so even though r_array_base
1835 // points to the length, we don't need to adjust it to point to the
1836 // data.
1837 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1838 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1839
1840 // We will consult the secondary-super array.
1841 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1842
1843 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1844 eor(result, result, r_super_klass);
1845 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match
1846
1847 // Is there another entry to check? Consult the bitmap.
1848 rorv(r_bitmap, r_bitmap, slot);
1849 // rol(r_bitmap, r_bitmap, 1);
1850 tbz(r_bitmap, 1, L_fallthrough);
1851
1852 // The slot we just inspected is at secondary_supers[r_array_index - 1].
1853 // The next slot to be inspected, by the logic we're about to call,
1854 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1855 // have been checked.
1856 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index,
1857 r_bitmap, r_array_length, result, /*is_stub*/false);
1858
1859 BLOCK_COMMENT("} lookup_secondary_supers_table");
1860
1861 bind(L_fallthrough);
1862
1863 if (VerifySecondarySupers) {
1864 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1865 temp1, temp2, result); // r1, r2, r5
1866 }
1867
1868 if (L_success) {
1869 cbz(result, *L_success);
1870 }
1871 }
1872
1873 // Called by code generated by check_klass_subtype_slow_path
1874 // above. This is called when there is a collision in the hashed
1875 // lookup in the secondary supers array.
1876 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1877 Register r_array_base,
1878 Register r_array_index,
1879 Register r_bitmap,
1880 Register temp1,
1881 Register result,
1882 bool is_stub) {
1883 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1884
1885 const Register
1886 r_array_length = temp1,
1887 r_sub_klass = noreg; // unused
1888
1889 if (is_stub) {
1890 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1891 }
1892
1893 Label L_fallthrough, L_huge;
1894
1895 // Load the array length.
1896 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1897 // And adjust the array base to point to the data.
1898 // NB! Effectively increments current slot index by 1.
1899 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1900 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1901
1902 // The bitmap is full to bursting.
1903 // Implicit invariant: BITMAP_FULL implies (length > 0)
1904 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1905 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2));
1906 br(GT, L_huge);
1907
1908 // NB! Our caller has checked bits 0 and 1 in the bitmap. The
1909 // current slot (at secondary_supers[r_array_index]) has not yet
1910 // been inspected, and r_array_index may be out of bounds if we
1911 // wrapped around the end of the array.
1912
1913 { // This is conventional linear probing, but instead of terminating
1914 // when a null entry is found in the table, we maintain a bitmap
1915 // in which a 0 indicates missing entries.
1916 // As long as the bitmap is not completely full,
1917 // array_length == popcount(bitmap). The array_length check above
1918 // guarantees there are 0s in the bitmap, so the loop eventually
1919 // terminates.
1920 Label L_loop;
1921 bind(L_loop);
1922
1923 // Check for wraparound.
1924 cmp(r_array_index, r_array_length);
1925 csel(r_array_index, zr, r_array_index, GE);
1926
1927 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1928 eor(result, rscratch1, r_super_klass);
1929 cbz(result, L_fallthrough);
1930
1931 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1932
1933 ror(r_bitmap, r_bitmap, 1);
1934 add(r_array_index, r_array_index, 1);
1935 b(L_loop);
1936 }
1937
1938 { // Degenerate case: more than 64 secondary supers.
1939 // FIXME: We could do something smarter here, maybe a vectorized
1940 // comparison or a binary search, but is that worth any added
1941 // complexity?
1942 bind(L_huge);
1943 cmp(sp, zr); // Clear Z flag; SP is never zero
1944 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1945 cset(result, NE); // result == 0 iff we got a match.
1946 }
1947
1948 bind(L_fallthrough);
1949 }
1950
1951 // Make sure that the hashed lookup and a linear scan agree.
1952 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1953 Register r_super_klass,
1954 Register temp1,
1955 Register temp2,
1956 Register result) {
1957 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1958
1959 const Register
1960 r_array_base = temp1,
1961 r_array_length = temp2;
1962
1963 BLOCK_COMMENT("verify_secondary_supers_table {");
1964
1965 // We will consult the secondary-super array.
1966 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1967
1968 // Load the array length.
1969 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1970 // And adjust the array base to point to the data.
1971 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1972
1973 cmp(sp, zr); // Clear Z flag; SP is never zero
1974 // Scan R2 words at [R5] for an occurrence of R0.
1975 // Set NZ/Z based on last compare.
1976 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
1977 // rscratch1 == 0 iff we got a match.
1978 cset(rscratch1, NE);
1979
1980 Label passed;
1981 cmp(result, zr);
1982 cset(result, NE); // normalize result to 0/1 for comparison
1983
1984 cmp(rscratch1, result);
1985 br(EQ, passed);
1986 {
1987 mov(r0, r_super_klass); // r0 <- r0
1988 mov(r1, r_sub_klass); // r1 <- r4
1989 mov(r2, /*expected*/rscratch1); // r2 <- r8
1990 mov(r3, result); // r3 <- r5
1991 mov(r4, (address)("mismatch")); // r4 <- const
1992 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
1993 should_not_reach_here();
1994 }
1995 bind(passed);
1996
1997 BLOCK_COMMENT("} verify_secondary_supers_table");
1998 }
1999
2000 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
2001 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
2002 assert_different_registers(klass, rthread, scratch);
2003
2004 Label L_fallthrough, L_tmp;
2005 if (L_fast_path == nullptr) {
2006 L_fast_path = &L_fallthrough;
2007 } else if (L_slow_path == nullptr) {
2008 L_slow_path = &L_fallthrough;
2009 }
2010 // Fast path check: class is fully initialized
2011 lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
2012 ldarb(scratch, scratch);
2013 cmp(scratch, InstanceKlass::fully_initialized);
2014 br(Assembler::EQ, *L_fast_path);
2015
2016 // Fast path check: current thread is initializer thread
2017 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
2018 cmp(rthread, scratch);
2019
2020 if (L_slow_path == &L_fallthrough) {
2021 br(Assembler::EQ, *L_fast_path);
2022 bind(*L_slow_path);
2023 } else if (L_fast_path == &L_fallthrough) {
2024 br(Assembler::NE, *L_slow_path);
2025 bind(*L_fast_path);
2026 } else {
2027 Unimplemented();
2028 }
2029 }
2030
2031 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
2032 if (!VerifyOops || VerifyAdapterSharing) {
2033 // Below address of the code string confuses VerifyAdapterSharing
2034 // because it may differ between otherwise equivalent adapters.
2035 return;
2036 }
2037
2038 // Pass register number to verify_oop_subroutine
2039 const char* b = nullptr;
2040 {
2041 ResourceMark rm;
2042 stringStream ss;
2043 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
2044 b = code_string(ss.as_string());
2045 }
2046 BLOCK_COMMENT("verify_oop {");
2047
2048 strip_return_address(); // This might happen within a stack frame.
2049 protect_return_address();
2050 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2051 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2052
2053 mov(r0, reg);
2054 movptr(rscratch1, (uintptr_t)(address)b);
2055
2056 // call indirectly to solve generation ordering problem
2057 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2058 ldr(rscratch2, Address(rscratch2));
2059 blr(rscratch2);
2060
2061 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2062 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2063 authenticate_return_address();
2064
2065 BLOCK_COMMENT("} verify_oop");
2066 }
2067
2068 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
2069 if (!VerifyOops || VerifyAdapterSharing) {
2070 // Below address of the code string confuses VerifyAdapterSharing
2071 // because it may differ between otherwise equivalent adapters.
2072 return;
2073 }
2074
2075 const char* b = nullptr;
2076 {
2077 ResourceMark rm;
2078 stringStream ss;
2079 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
2080 b = code_string(ss.as_string());
2081 }
2082 BLOCK_COMMENT("verify_oop_addr {");
2083
2084 strip_return_address(); // This might happen within a stack frame.
2085 protect_return_address();
2086 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2087 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2088
2089 // addr may contain sp so we will have to adjust it based on the
2090 // pushes that we just did.
2091 if (addr.uses(sp)) {
2092 lea(r0, addr);
2093 ldr(r0, Address(r0, 4 * wordSize));
2094 } else {
2095 ldr(r0, addr);
2096 }
2097 movptr(rscratch1, (uintptr_t)(address)b);
2098
2099 // call indirectly to solve generation ordering problem
2100 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2101 ldr(rscratch2, Address(rscratch2));
2102 blr(rscratch2);
2103
2104 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2105 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2106 authenticate_return_address();
2107
2108 BLOCK_COMMENT("} verify_oop_addr");
2109 }
2110
2111 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2112 int extra_slot_offset) {
2113 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2114 int stackElementSize = Interpreter::stackElementSize;
2115 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
2116 #ifdef ASSERT
2117 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2118 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2119 #endif
2120 if (arg_slot.is_constant()) {
2121 return Address(esp, arg_slot.as_constant() * stackElementSize
2122 + offset);
2123 } else {
2124 add(rscratch1, esp, arg_slot.as_register(),
2125 ext::uxtx, exact_log2(stackElementSize));
2126 return Address(rscratch1, offset);
2127 }
2128 }
2129
2130 // Handle the receiver type profile update given the "recv" klass.
2131 //
2132 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
2133 // If there are no matching or claimable receiver entries in RD, updates
2134 // the polymorphic counter.
2135 //
2136 // This code expected to run by either the interpreter or JIT-ed code, without
2137 // extra synchronization. For safety, receiver cells are claimed atomically, which
2138 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
2139 // counter updates are not atomic.
2140 //
2141 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
2142 assert_different_registers(recv, mdp, rscratch1, rscratch2);
2143
2144 int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
2145 int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
2146 int poly_count_offset = in_bytes(CounterData::count_offset());
2147 int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
2148 int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
2149
2150 // Adjust for MDP offsets.
2151 base_receiver_offset += mdp_offset;
2152 end_receiver_offset += mdp_offset;
2153 poly_count_offset += mdp_offset;
2154
2155 #ifdef ASSERT
2156 // We are about to walk the MDO slots without asking for offsets.
2157 // Check that our math hits all the right spots.
2158 for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
2159 int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
2160 int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
2161 int offset = base_receiver_offset + receiver_step*c;
2162 int count_offset = offset + receiver_to_count_step;
2163 assert(offset == real_recv_offset, "receiver slot math");
2164 assert(count_offset == real_count_offset, "receiver count math");
2165 }
2166 int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
2167 assert(poly_count_offset == real_poly_count_offset, "poly counter math");
2168 #endif
2169
2170 // Corner case: no profile table. Increment poly counter and exit.
2171 if (ReceiverTypeData::row_limit() == 0) {
2172 increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
2173 return;
2174 }
2175
2176 Register offset = rscratch2;
2177
2178 Label L_loop_search_receiver, L_loop_search_empty;
2179 Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
2180
2181 // The code here recognizes three major cases:
2182 // A. Fastest: receiver found in the table
2183 // B. Fast: no receiver in the table, and the table is full
2184 // C. Slow: no receiver in the table, free slots in the table
2185 //
2186 // The case A performance is most important, as perfectly-behaved code would end up
2187 // there, especially with larger TypeProfileWidth. The case B performance is
2188 // important as well, this is where bulk of code would land for normally megamorphic
2189 // cases. The case C performance is not essential, its job is to deal with installation
2190 // races, we optimize for code density instead. Case C needs to make sure that receiver
2191 // rows are only claimed once. This makes sure we never overwrite a row for another
2192 // receiver and never duplicate the receivers in the list, making profile type-accurate.
2193 //
2194 // It is very tempting to handle these cases in a single loop, and claim the first slot
2195 // without checking the rest of the table. But, profiling code should tolerate free slots
2196 // in the table, as class unloading can clear them. After such cleanup, the receiver
2197 // we need might be _after_ the free slot. Therefore, we need to let at least full scan
2198 // to complete, before trying to install new slots. Splitting the code in several tight
2199 // loops also helpfully optimizes for cases A and B.
2200 //
2201 // This code is effectively:
2202 //
2203 // restart:
2204 // // Fastest: receiver is already installed
2205 // for (i = 0; i < receiver_count(); i++) {
2206 // if (receiver(i) == recv) goto found_recv(i);
2207 // }
2208 //
2209 // // Fast: no receiver, but profile is full
2210 // for (i = 0; i < receiver_count(); i++) {
2211 // if (receiver(i) == null) goto found_null(i);
2212 // }
2213 // goto polymorphic
2214 //
2215 // // Slow: try to install receiver
2216 // found_null(i):
2217 // CAS(&receiver(i), null, recv);
2218 // goto restart
2219 //
2220 // polymorphic:
2221 // count++;
2222 // return
2223 //
2224 // found_recv(i):
2225 // *receiver_count(i)++
2226 //
2227
2228 bind(L_restart);
2229
2230 // Fastest: receiver is already installed
2231 mov(offset, base_receiver_offset);
2232 bind(L_loop_search_receiver);
2233 ldr(rscratch1, Address(mdp, offset));
2234 cmp(rscratch1, recv);
2235 br(Assembler::EQ, L_found_recv);
2236 add(offset, offset, receiver_step);
2237 sub(rscratch1, offset, end_receiver_offset);
2238 cbnz(rscratch1, L_loop_search_receiver);
2239
2240 // Fast: no receiver, but profile is full
2241 mov(offset, base_receiver_offset);
2242 bind(L_loop_search_empty);
2243 ldr(rscratch1, Address(mdp, offset));
2244 cbz(rscratch1, L_found_empty);
2245 add(offset, offset, receiver_step);
2246 sub(rscratch1, offset, end_receiver_offset);
2247 cbnz(rscratch1, L_loop_search_empty);
2248 b(L_polymorphic);
2249
2250 // Slow: try to install receiver
2251 bind(L_found_empty);
2252
2253 // Atomically swing receiver slot: null -> recv.
2254 //
2255 // The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
2256 // is used to hold the destination address. This is safe because the
2257 // offset is no longer needed after the address is computed.
2258
2259 lea(rscratch2, Address(mdp, offset));
2260 cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
2261 /*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
2262
2263 // CAS success means the slot now has the receiver we want. CAS failure means
2264 // something had claimed the slot concurrently: it can be the same receiver we want,
2265 // or something else. Since this is a slow path, we can optimize for code density,
2266 // and just restart the search from the beginning.
2267 b(L_restart);
2268
2269 // Counter updates:
2270
2271 // Increment polymorphic counter instead of receiver slot.
2272 bind(L_polymorphic);
2273 mov(offset, poly_count_offset);
2274 b(L_count_update);
2275
2276 // Found a receiver, convert its slot offset to corresponding count offset.
2277 bind(L_found_recv);
2278 add(offset, offset, receiver_to_count_step);
2279
2280 bind(L_count_update);
2281 increment(Address(mdp, offset), DataLayout::counter_increment);
2282 }
2283
2284
2285 void MacroAssembler::call_VM_leaf_base(address entry_point,
2286 int number_of_arguments,
2287 Label *retaddr) {
2288 Label E, L;
2289
2290 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2291
2292 mov(rscratch1, RuntimeAddress(entry_point));
2293 blr(rscratch1);
2294 if (retaddr)
2295 bind(*retaddr);
2296
2297 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2298 }
2299
2300 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2301 call_VM_leaf_base(entry_point, number_of_arguments);
2302 }
2303
2304 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2305 pass_arg0(this, arg_0);
2306 call_VM_leaf_base(entry_point, 1);
2307 }
2308
2309 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2310 assert_different_registers(arg_1, c_rarg0);
2311 pass_arg0(this, arg_0);
2312 pass_arg1(this, arg_1);
2313 call_VM_leaf_base(entry_point, 2);
2314 }
2315
2316 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
2317 Register arg_1, Register arg_2) {
2318 assert_different_registers(arg_1, c_rarg0);
2319 assert_different_registers(arg_2, c_rarg0, c_rarg1);
2320 pass_arg0(this, arg_0);
2321 pass_arg1(this, arg_1);
2322 pass_arg2(this, arg_2);
2323 call_VM_leaf_base(entry_point, 3);
2324 }
2325
2326 void MacroAssembler::super_call_VM_leaf(address entry_point) {
2327 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2328 }
2329
2330 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2331 pass_arg0(this, arg_0);
2332 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2333 }
2334
2335 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2336
2337 assert_different_registers(arg_0, c_rarg1);
2338 pass_arg1(this, arg_1);
2339 pass_arg0(this, arg_0);
2340 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2341 }
2342
2343 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2344 assert_different_registers(arg_0, c_rarg1, c_rarg2);
2345 assert_different_registers(arg_1, c_rarg2);
2346 pass_arg2(this, arg_2);
2347 pass_arg1(this, arg_1);
2348 pass_arg0(this, arg_0);
2349 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2350 }
2351
2352 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2353 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2354 assert_different_registers(arg_1, c_rarg2, c_rarg3);
2355 assert_different_registers(arg_2, c_rarg3);
2356 pass_arg3(this, arg_3);
2357 pass_arg2(this, arg_2);
2358 pass_arg1(this, arg_1);
2359 pass_arg0(this, arg_0);
2360 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2361 }
2362
2363 void MacroAssembler::null_check(Register reg, int offset) {
2364 if (needs_explicit_null_check(offset)) {
2365 // provoke OS null exception if reg is null by
2366 // accessing M[reg] w/o changing any registers
2367 // NOTE: this is plenty to provoke a segv
2368 ldr(zr, Address(reg));
2369 } else {
2370 // nothing to do, (later) access of M[reg + offset]
2371 // will provoke OS null exception if reg is null
2372 }
2373 }
2374
2375 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2376 assert_different_registers(markword, rscratch2);
2377 mov(rscratch2, markWord::inline_type_pattern_mask);
2378 andr(markword, markword, rscratch2);
2379 mov(rscratch2, markWord::inline_type_pattern);
2380 cmp(markword, rscratch2);
2381 br(Assembler::EQ, is_inline_type);
2382 }
2383
2384 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
2385 assert_different_registers(tmp, rscratch1);
2386 if (can_be_null) {
2387 cbz(object, not_inline_type);
2388 }
2389 const int is_inline_type_mask = markWord::inline_type_pattern;
2390 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2391 mov(rscratch1, is_inline_type_mask);
2392 andr(tmp, tmp, rscratch1);
2393 cmp(tmp, rscratch1);
2394 br(Assembler::NE, not_inline_type);
2395 }
2396
2397 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2398 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2399 tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
2400 }
2401
2402 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2403 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2404 tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
2405 }
2406
2407 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2408 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2409 tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat);
2410 }
2411
2412 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2413 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2414 tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker);
2415 }
2416
2417 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2418 Label test_mark_word;
2419 // load mark word
2420 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2421 // check displaced
2422 tst(temp_reg, markWord::unlocked_value);
2423 br(Assembler::NE, test_mark_word);
2424 // slow path use klass prototype
2425 load_prototype_header(temp_reg, oop);
2426
2427 bind(test_mark_word);
2428 andr(temp_reg, temp_reg, test_bit);
2429 if (jmp_set) {
2430 cbnz(temp_reg, jmp_label);
2431 } else {
2432 cbz(temp_reg, jmp_label);
2433 }
2434 }
2435
2436 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) {
2437 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2438 }
2439
2440 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2441 Label&is_non_flat_array) {
2442 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2443 }
2444
2445 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
2446 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2447 }
2448
2449 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2450 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2451 }
2452
2453 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2454 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2455 br(Assembler::NE, is_flat_array);
2456 }
2457
2458 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2459 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2460 br(Assembler::EQ, is_non_flat_array);
2461 }
2462
2463 // MacroAssembler protected routines needed to implement
2464 // public methods
2465
2466 void MacroAssembler::mov(Register r, Address dest) {
2467 code_section()->relocate(pc(), dest.rspec());
2468 uint64_t imm64 = (uint64_t)dest.target();
2469 movptr(r, imm64);
2470 }
2471
2472 // Move a constant pointer into r. In AArch64 mode the virtual
2473 // address space is 48 bits in size, so we only need three
2474 // instructions to create a patchable instruction sequence that can
2475 // reach anywhere.
2476 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2477 #ifndef PRODUCT
2478 {
2479 char buffer[64];
2480 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2481 block_comment(buffer);
2482 }
2483 #endif
2484 assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2485 movz(r, imm64 & 0xffff);
2486 imm64 >>= 16;
2487 movk(r, imm64 & 0xffff, 16);
2488 imm64 >>= 16;
2489 movk(r, imm64 & 0xffff, 32);
2490 }
2491
2492 // Macro to mov replicated immediate to vector register.
2493 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2494 // the upper 56/48/32 bits must be zeros for B/H/S type.
2495 // Vd will get the following values for different arrangements in T
2496 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh
2497 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh
2498 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh
2499 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh
2500 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
2501 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
2502 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh
2503 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh
2504 // Clobbers rscratch1
2505 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2506 assert(T != T1Q, "unsupported");
2507 if (T == T1D || T == T2D) {
2508 int imm = operand_valid_for_movi_immediate(imm64, T);
2509 if (-1 != imm) {
2510 movi(Vd, T, imm);
2511 } else {
2512 mov(rscratch1, imm64);
2513 dup(Vd, T, rscratch1);
2514 }
2515 return;
2516 }
2517
2518 #ifdef ASSERT
2519 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2520 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2521 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2522 #endif
2523 int shift = operand_valid_for_movi_immediate(imm64, T);
2524 uint32_t imm32 = imm64 & 0xffffffffULL;
2525 if (shift >= 0) {
2526 movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2527 } else {
2528 movw(rscratch1, imm32);
2529 dup(Vd, T, rscratch1);
2530 }
2531 }
2532
2533 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2534 {
2535 #ifndef PRODUCT
2536 {
2537 char buffer[64];
2538 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2539 block_comment(buffer);
2540 }
2541 #endif
2542 if (operand_valid_for_logical_immediate(false, imm64)) {
2543 orr(dst, zr, imm64);
2544 } else {
2545 // we can use a combination of MOVZ or MOVN with
2546 // MOVK to build up the constant
2547 uint64_t imm_h[4];
2548 int zero_count = 0;
2549 int neg_count = 0;
2550 int i;
2551 for (i = 0; i < 4; i++) {
2552 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2553 if (imm_h[i] == 0) {
2554 zero_count++;
2555 } else if (imm_h[i] == 0xffffL) {
2556 neg_count++;
2557 }
2558 }
2559 if (zero_count == 4) {
2560 // one MOVZ will do
2561 movz(dst, 0);
2562 } else if (neg_count == 4) {
2563 // one MOVN will do
2564 movn(dst, 0);
2565 } else if (zero_count == 3) {
2566 for (i = 0; i < 4; i++) {
2567 if (imm_h[i] != 0L) {
2568 movz(dst, (uint32_t)imm_h[i], (i << 4));
2569 break;
2570 }
2571 }
2572 } else if (neg_count == 3) {
2573 // one MOVN will do
2574 for (int i = 0; i < 4; i++) {
2575 if (imm_h[i] != 0xffffL) {
2576 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2577 break;
2578 }
2579 }
2580 } else if (zero_count == 2) {
2581 // one MOVZ and one MOVK will do
2582 for (i = 0; i < 3; i++) {
2583 if (imm_h[i] != 0L) {
2584 movz(dst, (uint32_t)imm_h[i], (i << 4));
2585 i++;
2586 break;
2587 }
2588 }
2589 for (;i < 4; i++) {
2590 if (imm_h[i] != 0L) {
2591 movk(dst, (uint32_t)imm_h[i], (i << 4));
2592 }
2593 }
2594 } else if (neg_count == 2) {
2595 // one MOVN and one MOVK will do
2596 for (i = 0; i < 4; i++) {
2597 if (imm_h[i] != 0xffffL) {
2598 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2599 i++;
2600 break;
2601 }
2602 }
2603 for (;i < 4; i++) {
2604 if (imm_h[i] != 0xffffL) {
2605 movk(dst, (uint32_t)imm_h[i], (i << 4));
2606 }
2607 }
2608 } else if (zero_count == 1) {
2609 // one MOVZ and two MOVKs will do
2610 for (i = 0; i < 4; i++) {
2611 if (imm_h[i] != 0L) {
2612 movz(dst, (uint32_t)imm_h[i], (i << 4));
2613 i++;
2614 break;
2615 }
2616 }
2617 for (;i < 4; i++) {
2618 if (imm_h[i] != 0x0L) {
2619 movk(dst, (uint32_t)imm_h[i], (i << 4));
2620 }
2621 }
2622 } else if (neg_count == 1) {
2623 // one MOVN and two MOVKs will do
2624 for (i = 0; i < 4; i++) {
2625 if (imm_h[i] != 0xffffL) {
2626 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2627 i++;
2628 break;
2629 }
2630 }
2631 for (;i < 4; i++) {
2632 if (imm_h[i] != 0xffffL) {
2633 movk(dst, (uint32_t)imm_h[i], (i << 4));
2634 }
2635 }
2636 } else {
2637 // use a MOVZ and 3 MOVKs (makes it easier to debug)
2638 movz(dst, (uint32_t)imm_h[0], 0);
2639 for (i = 1; i < 4; i++) {
2640 movk(dst, (uint32_t)imm_h[i], (i << 4));
2641 }
2642 }
2643 }
2644 }
2645
2646 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2647 {
2648 #ifndef PRODUCT
2649 {
2650 char buffer[64];
2651 os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2652 block_comment(buffer);
2653 }
2654 #endif
2655 if (operand_valid_for_logical_immediate(true, imm32)) {
2656 orrw(dst, zr, imm32);
2657 } else {
2658 // we can use MOVZ, MOVN or two calls to MOVK to build up the
2659 // constant
2660 uint32_t imm_h[2];
2661 imm_h[0] = imm32 & 0xffff;
2662 imm_h[1] = ((imm32 >> 16) & 0xffff);
2663 if (imm_h[0] == 0) {
2664 movzw(dst, imm_h[1], 16);
2665 } else if (imm_h[0] == 0xffff) {
2666 movnw(dst, imm_h[1] ^ 0xffff, 16);
2667 } else if (imm_h[1] == 0) {
2668 movzw(dst, imm_h[0], 0);
2669 } else if (imm_h[1] == 0xffff) {
2670 movnw(dst, imm_h[0] ^ 0xffff, 0);
2671 } else {
2672 // use a MOVZ and MOVK (makes it easier to debug)
2673 movzw(dst, imm_h[0], 0);
2674 movkw(dst, imm_h[1], 16);
2675 }
2676 }
2677 }
2678
2679 // Form an address from base + offset in Rd. Rd may or may
2680 // not actually be used: you must use the Address that is returned.
2681 // It is up to you to ensure that the shift provided matches the size
2682 // of your data.
2683 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2684 if (Address::offset_ok_for_immed(byte_offset, shift))
2685 // It fits; no need for any heroics
2686 return Address(base, byte_offset);
2687
2688 // Don't do anything clever with negative or misaligned offsets
2689 unsigned mask = (1 << shift) - 1;
2690 if (byte_offset < 0 || byte_offset & mask) {
2691 mov(Rd, byte_offset);
2692 add(Rd, base, Rd);
2693 return Address(Rd);
2694 }
2695
2696 // See if we can do this with two 12-bit offsets
2697 {
2698 uint64_t word_offset = byte_offset >> shift;
2699 uint64_t masked_offset = word_offset & 0xfff000;
2700 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2701 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2702 add(Rd, base, masked_offset << shift);
2703 word_offset -= masked_offset;
2704 return Address(Rd, word_offset << shift);
2705 }
2706 }
2707
2708 // Do it the hard way
2709 mov(Rd, byte_offset);
2710 add(Rd, base, Rd);
2711 return Address(Rd);
2712 }
2713
2714 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2715 bool want_remainder, Register scratch)
2716 {
2717 // Full implementation of Java idiv and irem. The function
2718 // returns the (pc) offset of the div instruction - may be needed
2719 // for implicit exceptions.
2720 //
2721 // constraint : ra/rb =/= scratch
2722 // normal case
2723 //
2724 // input : ra: dividend
2725 // rb: divisor
2726 //
2727 // result: either
2728 // quotient (= ra idiv rb)
2729 // remainder (= ra irem rb)
2730
2731 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2732
2733 int idivl_offset = offset();
2734 if (! want_remainder) {
2735 sdivw(result, ra, rb);
2736 } else {
2737 sdivw(scratch, ra, rb);
2738 Assembler::msubw(result, scratch, rb, ra);
2739 }
2740
2741 return idivl_offset;
2742 }
2743
2744 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2745 bool want_remainder, Register scratch)
2746 {
2747 // Full implementation of Java ldiv and lrem. The function
2748 // returns the (pc) offset of the div instruction - may be needed
2749 // for implicit exceptions.
2750 //
2751 // constraint : ra/rb =/= scratch
2752 // normal case
2753 //
2754 // input : ra: dividend
2755 // rb: divisor
2756 //
2757 // result: either
2758 // quotient (= ra idiv rb)
2759 // remainder (= ra irem rb)
2760
2761 assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2762
2763 int idivq_offset = offset();
2764 if (! want_remainder) {
2765 sdiv(result, ra, rb);
2766 } else {
2767 sdiv(scratch, ra, rb);
2768 Assembler::msub(result, scratch, rb, ra);
2769 }
2770
2771 return idivq_offset;
2772 }
2773
2774 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2775 address prev = pc() - NativeMembar::instruction_size;
2776 address last = code()->last_insn();
2777 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2778 NativeMembar *bar = NativeMembar_at(prev);
2779 if (AlwaysMergeDMB) {
2780 bar->set_kind(bar->get_kind() | order_constraint);
2781 BLOCK_COMMENT("merged membar(always)");
2782 return;
2783 }
2784 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2785 // doing so would introduce a StoreLoad which the caller did not
2786 // intend
2787 if (bar->get_kind() == order_constraint
2788 || bar->get_kind() == AnyAny
2789 || order_constraint == AnyAny) {
2790 // We are merging two memory barrier instructions. On AArch64 we
2791 // can do this simply by ORing them together.
2792 bar->set_kind(bar->get_kind() | order_constraint);
2793 BLOCK_COMMENT("merged membar");
2794 return;
2795 } else {
2796 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2797 // We need check the last 2 instructions
2798 address prev2 = prev - NativeMembar::instruction_size;
2799 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2800 NativeMembar *bar2 = NativeMembar_at(prev2);
2801 assert(bar2->get_kind() == order_constraint, "it should be merged before");
2802 BLOCK_COMMENT("merged membar(elided)");
2803 return;
2804 }
2805 }
2806 }
2807 code()->set_last_insn(pc());
2808 dmb(Assembler::barrier(order_constraint));
2809 }
2810
2811 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2812 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2813 merge_ldst(rt, adr, size_in_bytes, is_store);
2814 code()->clear_last_insn();
2815 return true;
2816 } else {
2817 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2818 const uint64_t mask = size_in_bytes - 1;
2819 if (adr.getMode() == Address::base_plus_offset &&
2820 (adr.offset() & mask) == 0) { // only supports base_plus_offset.
2821 code()->set_last_insn(pc());
2822 }
2823 return false;
2824 }
2825 }
2826
2827 void MacroAssembler::ldr(Register Rx, const Address &adr) {
2828 // We always try to merge two adjacent loads into one ldp.
2829 if (!try_merge_ldst(Rx, adr, 8, false)) {
2830 Assembler::ldr(Rx, adr);
2831 }
2832 }
2833
2834 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2835 // We always try to merge two adjacent loads into one ldp.
2836 if (!try_merge_ldst(Rw, adr, 4, false)) {
2837 Assembler::ldrw(Rw, adr);
2838 }
2839 }
2840
2841 void MacroAssembler::str(Register Rx, const Address &adr) {
2842 // We always try to merge two adjacent stores into one stp.
2843 if (!try_merge_ldst(Rx, adr, 8, true)) {
2844 Assembler::str(Rx, adr);
2845 }
2846 }
2847
2848 void MacroAssembler::strw(Register Rw, const Address &adr) {
2849 // We always try to merge two adjacent stores into one stp.
2850 if (!try_merge_ldst(Rw, adr, 4, true)) {
2851 Assembler::strw(Rw, adr);
2852 }
2853 }
2854
2855 // MacroAssembler routines found actually to be needed
2856
2857 void MacroAssembler::push(Register src)
2858 {
2859 str(src, Address(pre(esp, -1 * wordSize)));
2860 }
2861
2862 void MacroAssembler::pop(Register dst)
2863 {
2864 ldr(dst, Address(post(esp, 1 * wordSize)));
2865 }
2866
2867 // Note: load_unsigned_short used to be called load_unsigned_word.
2868 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2869 int off = offset();
2870 ldrh(dst, src);
2871 return off;
2872 }
2873
2874 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2875 int off = offset();
2876 ldrb(dst, src);
2877 return off;
2878 }
2879
2880 int MacroAssembler::load_signed_short(Register dst, Address src) {
2881 int off = offset();
2882 ldrsh(dst, src);
2883 return off;
2884 }
2885
2886 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2887 int off = offset();
2888 ldrsb(dst, src);
2889 return off;
2890 }
2891
2892 int MacroAssembler::load_signed_short32(Register dst, Address src) {
2893 int off = offset();
2894 ldrshw(dst, src);
2895 return off;
2896 }
2897
2898 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2899 int off = offset();
2900 ldrsbw(dst, src);
2901 return off;
2902 }
2903
2904 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2905 switch (size_in_bytes) {
2906 case 8: ldr(dst, src); break;
2907 case 4: ldrw(dst, src); break;
2908 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2909 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2910 default: ShouldNotReachHere();
2911 }
2912 }
2913
2914 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2915 switch (size_in_bytes) {
2916 case 8: str(src, dst); break;
2917 case 4: strw(src, dst); break;
2918 case 2: strh(src, dst); break;
2919 case 1: strb(src, dst); break;
2920 default: ShouldNotReachHere();
2921 }
2922 }
2923
2924 void MacroAssembler::narrow_subword_type(Register reg, BasicType bt) {
2925 assert(is_subword_type(bt), "required");
2926 switch (bt) {
2927 case T_BOOLEAN: andw(reg, reg, 1); break;
2928 case T_BYTE: sxtbw(reg, reg); break;
2929 case T_CHAR: uxthw(reg, reg); break;
2930 case T_SHORT: sxthw(reg, reg); break;
2931 default: ShouldNotReachHere();
2932 }
2933 }
2934
2935 void MacroAssembler::decrementw(Register reg, int value)
2936 {
2937 if (value < 0) { incrementw(reg, -value); return; }
2938 if (value == 0) { return; }
2939 if (value < (1 << 12)) { subw(reg, reg, value); return; }
2940 /* else */ {
2941 guarantee(reg != rscratch2, "invalid dst for register decrement");
2942 movw(rscratch2, (unsigned)value);
2943 subw(reg, reg, rscratch2);
2944 }
2945 }
2946
2947 void MacroAssembler::decrement(Register reg, int value)
2948 {
2949 if (value < 0) { increment(reg, -value); return; }
2950 if (value == 0) { return; }
2951 if (value < (1 << 12)) { sub(reg, reg, value); return; }
2952 /* else */ {
2953 assert(reg != rscratch2, "invalid dst for register decrement");
2954 mov(rscratch2, (uint64_t)value);
2955 sub(reg, reg, rscratch2);
2956 }
2957 }
2958
2959 void MacroAssembler::decrementw(Address dst, int value)
2960 {
2961 assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2962 if (dst.getMode() == Address::literal) {
2963 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2964 lea(rscratch2, dst);
2965 dst = Address(rscratch2);
2966 }
2967 ldrw(rscratch1, dst);
2968 decrementw(rscratch1, value);
2969 strw(rscratch1, dst);
2970 }
2971
2972 void MacroAssembler::decrement(Address dst, int value)
2973 {
2974 assert(!dst.uses(rscratch1), "invalid address for decrement");
2975 if (dst.getMode() == Address::literal) {
2976 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2977 lea(rscratch2, dst);
2978 dst = Address(rscratch2);
2979 }
2980 ldr(rscratch1, dst);
2981 decrement(rscratch1, value);
2982 str(rscratch1, dst);
2983 }
2984
2985 void MacroAssembler::incrementw(Register reg, int value)
2986 {
2987 if (value < 0) { decrementw(reg, -value); return; }
2988 if (value == 0) { return; }
2989 if (value < (1 << 12)) { addw(reg, reg, value); return; }
2990 /* else */ {
2991 assert(reg != rscratch2, "invalid dst for register increment");
2992 movw(rscratch2, (unsigned)value);
2993 addw(reg, reg, rscratch2);
2994 }
2995 }
2996
2997 void MacroAssembler::increment(Register reg, int value)
2998 {
2999 if (value < 0) { decrement(reg, -value); return; }
3000 if (value == 0) { return; }
3001 if (value < (1 << 12)) { add(reg, reg, value); return; }
3002 /* else */ {
3003 assert(reg != rscratch2, "invalid dst for register increment");
3004 movw(rscratch2, (unsigned)value);
3005 add(reg, reg, rscratch2);
3006 }
3007 }
3008
3009 void MacroAssembler::incrementw(Address dst, int value)
3010 {
3011 assert(!dst.uses(rscratch1), "invalid dst for address increment");
3012 if (dst.getMode() == Address::literal) {
3013 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
3014 lea(rscratch2, dst);
3015 dst = Address(rscratch2);
3016 }
3017 ldrw(rscratch1, dst);
3018 incrementw(rscratch1, value);
3019 strw(rscratch1, dst);
3020 }
3021
3022 void MacroAssembler::increment(Address dst, int value)
3023 {
3024 assert(!dst.uses(rscratch1), "invalid dst for address increment");
3025 if (dst.getMode() == Address::literal) {
3026 assert(abs(value) < (1 << 12), "invalid value and address mode combination");
3027 lea(rscratch2, dst);
3028 dst = Address(rscratch2);
3029 }
3030 ldr(rscratch1, dst);
3031 increment(rscratch1, value);
3032 str(rscratch1, dst);
3033 }
3034
3035 // Push lots of registers in the bit set supplied. Don't push sp.
3036 // Return the number of words pushed
3037 int MacroAssembler::push(RegSet regset, Register stack) {
3038 if (regset.bits() == 0) {
3039 return 0;
3040 }
3041 auto bitset = integer_cast<unsigned int>(regset.bits());
3042 int words_pushed = 0;
3043
3044 // Scan bitset to accumulate register pairs
3045 unsigned char regs[32];
3046 int count = 0;
3047 for (int reg = 0; reg <= 30; reg++) {
3048 if (1 & bitset)
3049 regs[count++] = reg;
3050 bitset >>= 1;
3051 }
3052 regs[count++] = zr->raw_encoding();
3053 count &= ~1; // Only push an even number of regs
3054
3055 if (count) {
3056 stp(as_Register(regs[0]), as_Register(regs[1]),
3057 Address(pre(stack, -count * wordSize)));
3058 words_pushed += 2;
3059 }
3060 for (int i = 2; i < count; i += 2) {
3061 stp(as_Register(regs[i]), as_Register(regs[i+1]),
3062 Address(stack, i * wordSize));
3063 words_pushed += 2;
3064 }
3065
3066 assert(words_pushed == count, "oops, pushed != count");
3067
3068 return count;
3069 }
3070
3071 int MacroAssembler::pop(RegSet regset, Register stack) {
3072 if (regset.bits() == 0) {
3073 return 0;
3074 }
3075 auto bitset = integer_cast<unsigned int>(regset.bits());
3076 int words_pushed = 0;
3077
3078 // Scan bitset to accumulate register pairs
3079 unsigned char regs[32];
3080 int count = 0;
3081 for (int reg = 0; reg <= 30; reg++) {
3082 if (1 & bitset)
3083 regs[count++] = reg;
3084 bitset >>= 1;
3085 }
3086 regs[count++] = zr->raw_encoding();
3087 count &= ~1;
3088
3089 for (int i = 2; i < count; i += 2) {
3090 ldp(as_Register(regs[i]), as_Register(regs[i+1]),
3091 Address(stack, i * wordSize));
3092 words_pushed += 2;
3093 }
3094 if (count) {
3095 ldp(as_Register(regs[0]), as_Register(regs[1]),
3096 Address(post(stack, count * wordSize)));
3097 words_pushed += 2;
3098 }
3099
3100 assert(words_pushed == count, "oops, pushed != count");
3101
3102 return count;
3103 }
3104
3105 // Push lots of registers in the bit set supplied. Don't push sp.
3106 // Return the number of dwords pushed
3107 int MacroAssembler::push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
3108 if (regset.bits() == 0) {
3109 return 0;
3110 }
3111 auto bitset = integer_cast<unsigned int>(regset.bits());
3112 int words_pushed = 0;
3113 bool use_sve = false;
3114 int sve_vector_size_in_bytes = 0;
3115
3116 #ifdef COMPILER2
3117 use_sve = Matcher::supports_scalable_vector();
3118 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3119 #endif
3120
3121 // Scan bitset to accumulate register pairs
3122 unsigned char regs[32];
3123 int count = 0;
3124 for (int reg = 0; reg <= 31; reg++) {
3125 if (1 & bitset)
3126 regs[count++] = reg;
3127 bitset >>= 1;
3128 }
3129
3130 if (count == 0) {
3131 return 0;
3132 }
3133
3134 if (mode == PushPopFull) {
3135 if (use_sve && sve_vector_size_in_bytes > 16) {
3136 mode = PushPopSVE;
3137 } else {
3138 mode = PushPopNeon;
3139 }
3140 }
3141
3142 #ifndef PRODUCT
3143 {
3144 char buffer[48];
3145 if (mode == PushPopSVE) {
3146 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
3147 } else if (mode == PushPopNeon) {
3148 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
3149 } else {
3150 os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
3151 }
3152 block_comment(buffer);
3153 }
3154 #endif
3155
3156 if (mode == PushPopSVE) {
3157 sub(stack, stack, sve_vector_size_in_bytes * count);
3158 for (int i = 0; i < count; i++) {
3159 sve_str(as_FloatRegister(regs[i]), Address(stack, i));
3160 }
3161 return count * sve_vector_size_in_bytes / 8;
3162 }
3163
3164 if (mode == PushPopNeon) {
3165 if (count == 1) {
3166 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
3167 return 2;
3168 }
3169
3170 bool odd = (count & 1) == 1;
3171 int push_slots = count + (odd ? 1 : 0);
3172
3173 // Always pushing full 128 bit registers.
3174 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
3175 words_pushed += 2;
3176
3177 for (int i = 2; i + 1 < count; i += 2) {
3178 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3179 words_pushed += 2;
3180 }
3181
3182 if (odd) {
3183 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3184 words_pushed++;
3185 }
3186
3187 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3188 return count * 2;
3189 }
3190
3191 if (mode == PushPopFp) {
3192 bool odd = (count & 1) == 1;
3193 int push_slots = count + (odd ? 1 : 0);
3194
3195 if (count == 1) {
3196 // Stack pointer must be 16 bytes aligned
3197 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
3198 return 1;
3199 }
3200
3201 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
3202 words_pushed += 2;
3203
3204 for (int i = 2; i + 1 < count; i += 2) {
3205 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3206 words_pushed += 2;
3207 }
3208
3209 if (odd) {
3210 // Stack pointer must be 16 bytes aligned
3211 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3212 words_pushed++;
3213 }
3214
3215 assert(words_pushed == count, "oops, pushed != count");
3216
3217 return count;
3218 }
3219
3220 return 0;
3221 }
3222
3223 // Return the number of dwords popped
3224 int MacroAssembler::pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
3225 if (regset.bits() == 0) {
3226 return 0;
3227 }
3228 auto bitset = integer_cast<unsigned int>(regset.bits());
3229 int words_pushed = 0;
3230 bool use_sve = false;
3231 int sve_vector_size_in_bytes = 0;
3232
3233 #ifdef COMPILER2
3234 use_sve = Matcher::supports_scalable_vector();
3235 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3236 #endif
3237 // Scan bitset to accumulate register pairs
3238 unsigned char regs[32];
3239 int count = 0;
3240 for (int reg = 0; reg <= 31; reg++) {
3241 if (1 & bitset)
3242 regs[count++] = reg;
3243 bitset >>= 1;
3244 }
3245
3246 if (count == 0) {
3247 return 0;
3248 }
3249
3250 if (mode == PushPopFull) {
3251 if (use_sve && sve_vector_size_in_bytes > 16) {
3252 mode = PushPopSVE;
3253 } else {
3254 mode = PushPopNeon;
3255 }
3256 }
3257
3258 #ifndef PRODUCT
3259 {
3260 char buffer[48];
3261 if (mode == PushPopSVE) {
3262 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
3263 } else if (mode == PushPopNeon) {
3264 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
3265 } else {
3266 os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
3267 }
3268 block_comment(buffer);
3269 }
3270 #endif
3271
3272 if (mode == PushPopSVE) {
3273 for (int i = count - 1; i >= 0; i--) {
3274 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
3275 }
3276 add(stack, stack, sve_vector_size_in_bytes * count);
3277 return count * sve_vector_size_in_bytes / 8;
3278 }
3279
3280 if (mode == PushPopNeon) {
3281 if (count == 1) {
3282 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
3283 return 2;
3284 }
3285
3286 bool odd = (count & 1) == 1;
3287 int push_slots = count + (odd ? 1 : 0);
3288
3289 if (odd) {
3290 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3291 words_pushed++;
3292 }
3293
3294 for (int i = 2; i + 1 < count; i += 2) {
3295 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3296 words_pushed += 2;
3297 }
3298
3299 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
3300 words_pushed += 2;
3301
3302 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3303
3304 return count * 2;
3305 }
3306
3307 if (mode == PushPopFp) {
3308 bool odd = (count & 1) == 1;
3309 int push_slots = count + (odd ? 1 : 0);
3310
3311 if (count == 1) {
3312 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
3313 return 1;
3314 }
3315
3316 if (odd) {
3317 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3318 words_pushed++;
3319 }
3320
3321 for (int i = 2; i + 1 < count; i += 2) {
3322 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3323 words_pushed += 2;
3324 }
3325
3326 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
3327 words_pushed += 2;
3328
3329 assert(words_pushed == count, "oops, pushed != count");
3330
3331 return count;
3332 }
3333
3334 return 0;
3335 }
3336
3337 // Return the number of dwords pushed
3338 int MacroAssembler::push_p(PRegSet regset, Register stack) {
3339 if (regset.bits() == 0) {
3340 return 0;
3341 }
3342 auto bitset = integer_cast<unsigned int>(regset.bits());
3343 bool use_sve = false;
3344 int sve_predicate_size_in_slots = 0;
3345
3346 #ifdef COMPILER2
3347 use_sve = Matcher::supports_scalable_vector();
3348 if (use_sve) {
3349 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3350 }
3351 #endif
3352
3353 if (!use_sve) {
3354 return 0;
3355 }
3356
3357 unsigned char regs[PRegister::number_of_registers];
3358 int count = 0;
3359 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3360 if (1 & bitset)
3361 regs[count++] = reg;
3362 bitset >>= 1;
3363 }
3364
3365 if (count == 0) {
3366 return 0;
3367 }
3368
3369 int total_push_bytes = align_up(sve_predicate_size_in_slots *
3370 VMRegImpl::stack_slot_size * count, 16);
3371 sub(stack, stack, total_push_bytes);
3372 for (int i = 0; i < count; i++) {
3373 sve_str(as_PRegister(regs[i]), Address(stack, i));
3374 }
3375 return total_push_bytes / 8;
3376 }
3377
3378 // Return the number of dwords popped
3379 int MacroAssembler::pop_p(PRegSet regset, Register stack) {
3380 if (regset.bits() == 0) {
3381 return 0;
3382 }
3383 auto bitset = integer_cast<unsigned int>(regset.bits());
3384 bool use_sve = false;
3385 int sve_predicate_size_in_slots = 0;
3386
3387 #ifdef COMPILER2
3388 use_sve = Matcher::supports_scalable_vector();
3389 if (use_sve) {
3390 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3391 }
3392 #endif
3393
3394 if (!use_sve) {
3395 return 0;
3396 }
3397
3398 unsigned char regs[PRegister::number_of_registers];
3399 int count = 0;
3400 for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3401 if (1 & bitset)
3402 regs[count++] = reg;
3403 bitset >>= 1;
3404 }
3405
3406 if (count == 0) {
3407 return 0;
3408 }
3409
3410 int total_pop_bytes = align_up(sve_predicate_size_in_slots *
3411 VMRegImpl::stack_slot_size * count, 16);
3412 for (int i = count - 1; i >= 0; i--) {
3413 sve_ldr(as_PRegister(regs[i]), Address(stack, i));
3414 }
3415 add(stack, stack, total_pop_bytes);
3416 return total_pop_bytes / 8;
3417 }
3418
3419 #ifdef ASSERT
3420 void MacroAssembler::verify_heapbase(const char* msg) {
3421 #if 0
3422 assert (Universe::heap() != nullptr, "java heap should be initialized");
3423 if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
3424 // rheapbase is allocated as general register
3425 return;
3426 }
3427 if (CheckCompressedOops) {
3428 Label ok;
3429 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
3430 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3431 br(Assembler::EQ, ok);
3432 stop(msg);
3433 bind(ok);
3434 pop(1 << rscratch1->encoding(), sp);
3435 }
3436 #endif
3437 }
3438 #endif
3439
3440 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3441 assert_different_registers(value, tmp1, tmp2);
3442 Label done, tagged, weak_tagged;
3443
3444 cbz(value, done); // Use null as-is.
3445 tst(value, JNIHandles::tag_mask); // Test for tag.
3446 br(Assembler::NE, tagged);
3447
3448 // Resolve local handle
3449 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
3450 verify_oop(value);
3451 b(done);
3452
3453 bind(tagged);
3454 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
3455 tbnz(value, 0, weak_tagged); // Test for weak tag.
3456
3457 // Resolve global handle
3458 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3459 verify_oop(value);
3460 b(done);
3461
3462 bind(weak_tagged);
3463 // Resolve jweak.
3464 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3465 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3466 verify_oop(value);
3467
3468 bind(done);
3469 }
3470
3471 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3472 assert_different_registers(value, tmp1, tmp2);
3473 Label done;
3474
3475 cbz(value, done); // Use null as-is.
3476
3477 #ifdef ASSERT
3478 {
3479 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3480 Label valid_global_tag;
3481 tbnz(value, 1, valid_global_tag); // Test for global tag
3482 stop("non global jobject using resolve_global_jobject");
3483 bind(valid_global_tag);
3484 }
3485 #endif
3486
3487 // Resolve global handle
3488 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3489 verify_oop(value);
3490
3491 bind(done);
3492 }
3493
3494 void MacroAssembler::stop(const char* msg) {
3495 // Skip AOT caching C strings in scratch buffer.
3496 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
3497 BLOCK_COMMENT(str);
3498 // load msg into r0 so we can access it from the signal handler
3499 // ExternalAddress enables saving and restoring via the code cache
3500 lea(c_rarg0, ExternalAddress((address) str));
3501 dcps1(0xdeae);
3502 }
3503
3504 void MacroAssembler::unimplemented(const char* what) {
3505 const char* buf = nullptr;
3506 {
3507 ResourceMark rm;
3508 stringStream ss;
3509 ss.print("unimplemented: %s", what);
3510 buf = code_string(ss.as_string());
3511 }
3512 stop(buf);
3513 }
3514
3515 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3516 #ifdef ASSERT
3517 Label OK;
3518 br(cc, OK);
3519 stop(msg);
3520 bind(OK);
3521 #endif
3522 }
3523
3524 // If a constant does not fit in an immediate field, generate some
3525 // number of MOV instructions and then perform the operation.
3526 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3527 add_sub_imm_insn insn1,
3528 add_sub_reg_insn insn2,
3529 bool is32) {
3530 assert(Rd != zr, "Rd = zr and not setting flags?");
3531 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3532 if (fits) {
3533 (this->*insn1)(Rd, Rn, imm);
3534 } else {
3535 if (g_uabs(imm) < (1 << 24)) {
3536 (this->*insn1)(Rd, Rn, imm & -(1 << 12));
3537 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3538 } else {
3539 assert_different_registers(Rd, Rn);
3540 mov(Rd, imm);
3541 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3542 }
3543 }
3544 }
3545
3546 // Separate vsn which sets the flags. Optimisations are more restricted
3547 // because we must set the flags correctly.
3548 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3549 add_sub_imm_insn insn1,
3550 add_sub_reg_insn insn2,
3551 bool is32) {
3552 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3553 if (fits) {
3554 (this->*insn1)(Rd, Rn, imm);
3555 } else {
3556 assert_different_registers(Rd, Rn);
3557 assert(Rd != zr, "overflow in immediate operand");
3558 mov(Rd, imm);
3559 (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3560 }
3561 }
3562
3563
3564 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3565 if (increment.is_register()) {
3566 add(Rd, Rn, increment.as_register());
3567 } else {
3568 add(Rd, Rn, increment.as_constant());
3569 }
3570 }
3571
3572 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3573 if (increment.is_register()) {
3574 addw(Rd, Rn, increment.as_register());
3575 } else {
3576 addw(Rd, Rn, increment.as_constant());
3577 }
3578 }
3579
3580 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3581 if (decrement.is_register()) {
3582 sub(Rd, Rn, decrement.as_register());
3583 } else {
3584 sub(Rd, Rn, decrement.as_constant());
3585 }
3586 }
3587
3588 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3589 if (decrement.is_register()) {
3590 subw(Rd, Rn, decrement.as_register());
3591 } else {
3592 subw(Rd, Rn, decrement.as_constant());
3593 }
3594 }
3595
3596 void MacroAssembler::reinit_heapbase()
3597 {
3598 if (UseCompressedOops) {
3599 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3600 mov(rheapbase, CompressedOops::base());
3601 } else {
3602 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3603 ldr(rheapbase, Address(rheapbase));
3604 }
3605 }
3606 }
3607
3608 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3609 // doesn't retry and may fail spuriously. If the oldval is wanted,
3610 // Pass a register for the result, otherwise pass noreg.
3611
3612 // Clobbers rscratch1
3613 void MacroAssembler::cmpxchg(Register addr, Register expected,
3614 Register new_val,
3615 enum operand_size size,
3616 bool acquire, bool release,
3617 bool weak,
3618 Register result) {
3619 if (result == noreg) result = rscratch1;
3620 BLOCK_COMMENT("cmpxchg {");
3621 if (UseLSE) {
3622 mov(result, expected);
3623 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3624 compare_eq(result, expected, size);
3625 #ifdef ASSERT
3626 // Poison rscratch1 which is written on !UseLSE branch
3627 mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3628 #endif
3629 } else {
3630 Label retry_load, done;
3631 prfm(Address(addr), PSTL1STRM);
3632 bind(retry_load);
3633 load_exclusive(result, addr, size, acquire);
3634 compare_eq(result, expected, size);
3635 br(Assembler::NE, done);
3636 store_exclusive(rscratch1, new_val, addr, size, release);
3637 if (weak) {
3638 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
3639 } else {
3640 cbnzw(rscratch1, retry_load);
3641 }
3642 bind(done);
3643 }
3644 BLOCK_COMMENT("} cmpxchg");
3645 }
3646
3647 // A generic comparison. Only compares for equality, clobbers rscratch1.
3648 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3649 if (size == xword) {
3650 cmp(rm, rn);
3651 } else if (size == word) {
3652 cmpw(rm, rn);
3653 } else if (size == halfword) {
3654 eorw(rscratch1, rm, rn);
3655 ands(zr, rscratch1, 0xffff);
3656 } else if (size == byte) {
3657 eorw(rscratch1, rm, rn);
3658 ands(zr, rscratch1, 0xff);
3659 } else {
3660 ShouldNotReachHere();
3661 }
3662 }
3663
3664
3665 static bool different(Register a, RegisterOrConstant b, Register c) {
3666 if (b.is_constant())
3667 return a != c;
3668 else
3669 return a != b.as_register() && a != c && b.as_register() != c;
3670 }
3671
3672 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
3673 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3674 if (UseLSE) { \
3675 prev = prev->is_valid() ? prev : zr; \
3676 if (incr.is_register()) { \
3677 AOP(sz, incr.as_register(), prev, addr); \
3678 } else { \
3679 mov(rscratch2, incr.as_constant()); \
3680 AOP(sz, rscratch2, prev, addr); \
3681 } \
3682 return; \
3683 } \
3684 Register result = rscratch2; \
3685 if (prev->is_valid()) \
3686 result = different(prev, incr, addr) ? prev : rscratch2; \
3687 \
3688 Label retry_load; \
3689 prfm(Address(addr), PSTL1STRM); \
3690 bind(retry_load); \
3691 LDXR(result, addr); \
3692 OP(rscratch1, result, incr); \
3693 STXR(rscratch2, rscratch1, addr); \
3694 cbnzw(rscratch2, retry_load); \
3695 if (prev->is_valid() && prev != result) { \
3696 IOP(prev, rscratch1, incr); \
3697 } \
3698 }
3699
3700 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3701 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3702 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3703 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3704
3705 #undef ATOMIC_OP
3706
3707 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
3708 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3709 if (UseLSE) { \
3710 prev = prev->is_valid() ? prev : zr; \
3711 AOP(sz, newv, prev, addr); \
3712 return; \
3713 } \
3714 Register result = rscratch2; \
3715 if (prev->is_valid()) \
3716 result = different(prev, newv, addr) ? prev : rscratch2; \
3717 \
3718 Label retry_load; \
3719 prfm(Address(addr), PSTL1STRM); \
3720 bind(retry_load); \
3721 LDXR(result, addr); \
3722 STXR(rscratch1, newv, addr); \
3723 cbnzw(rscratch1, retry_load); \
3724 if (prev->is_valid() && prev != result) \
3725 mov(prev, result); \
3726 }
3727
3728 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3729 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3730 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3731 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3732 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3733 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3734
3735 #undef ATOMIC_XCHG
3736
3737 #ifndef PRODUCT
3738 extern "C" void findpc(intptr_t x);
3739 #endif
3740
3741 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3742 {
3743 // In order to get locks to work, we need to fake a in_VM state
3744 if (ShowMessageBoxOnError) {
3745 JavaThread* thread = JavaThread::current();
3746 thread->set_thread_state(_thread_in_vm);
3747 #ifndef PRODUCT
3748 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3749 ttyLocker ttyl;
3750 BytecodeCounter::print();
3751 }
3752 #endif
3753 if (os::message_box(msg, "Execution stopped, print registers?")) {
3754 ttyLocker ttyl;
3755 tty->print_cr(" pc = 0x%016" PRIx64, pc);
3756 #ifndef PRODUCT
3757 tty->cr();
3758 findpc(pc);
3759 tty->cr();
3760 #endif
3761 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3762 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3763 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3764 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3765 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3766 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3767 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3768 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3769 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3770 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3771 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3772 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3773 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3774 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3775 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3776 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3777 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3778 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3779 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3780 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3781 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3782 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3783 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3784 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3785 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3786 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3787 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3788 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3789 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3790 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3791 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3792 BREAKPOINT;
3793 }
3794 }
3795 fatal("DEBUG MESSAGE: %s", msg);
3796 }
3797
3798 RegSet MacroAssembler::call_clobbered_gp_registers() {
3799 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3800 #ifndef R18_RESERVED
3801 regs += r18_tls;
3802 #endif
3803 return regs;
3804 }
3805
3806 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3807 int step = 4 * wordSize;
3808 push(call_clobbered_gp_registers() - exclude, sp);
3809 sub(sp, sp, step);
3810 mov(rscratch1, -step);
3811 // Push v0-v7, v16-v31.
3812 for (int i = 31; i>= 4; i -= 4) {
3813 if (i <= v7->encoding() || i >= v16->encoding())
3814 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3815 as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3816 }
3817 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3818 as_FloatRegister(3), T1D, Address(sp));
3819 }
3820
3821 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3822 for (int i = 0; i < 32; i += 4) {
3823 if (i <= v7->encoding() || i >= v16->encoding())
3824 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3825 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3826 }
3827
3828 reinitialize_ptrue();
3829
3830 pop(call_clobbered_gp_registers() - exclude, sp);
3831 }
3832
3833 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3834 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3835 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3836 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3837 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3838 for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3839 sve_str(as_FloatRegister(i), Address(sp, i));
3840 }
3841 } else {
3842 int step = (save_vectors ? 8 : 4) * wordSize;
3843 mov(rscratch1, -step);
3844 sub(sp, sp, step);
3845 for (int i = 28; i >= 4; i -= 4) {
3846 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3847 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3848 }
3849 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3850 }
3851 if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3852 sub(sp, sp, total_predicate_in_bytes);
3853 for (int i = 0; i < PRegister::number_of_registers; i++) {
3854 sve_str(as_PRegister(i), Address(sp, i));
3855 }
3856 }
3857 }
3858
3859 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3860 int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3861 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3862 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3863 sve_ldr(as_PRegister(i), Address(sp, i));
3864 }
3865 add(sp, sp, total_predicate_in_bytes);
3866 }
3867 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3868 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3869 sve_ldr(as_FloatRegister(i), Address(sp, i));
3870 }
3871 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3872 } else {
3873 int step = (restore_vectors ? 8 : 4) * wordSize;
3874 for (int i = 0; i <= 28; i += 4)
3875 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3876 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3877 }
3878
3879 // We may use predicate registers and rely on ptrue with SVE,
3880 // regardless of wide vector (> 8 bytes) used or not.
3881 if (use_sve) {
3882 reinitialize_ptrue();
3883 }
3884
3885 // integer registers except lr & sp
3886 pop(RegSet::range(r0, r17), sp);
3887 #ifdef R18_RESERVED
3888 ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3889 pop(RegSet::range(r20, r29), sp);
3890 #else
3891 pop(RegSet::range(r18_tls, r29), sp);
3892 #endif
3893 }
3894
3895 /**
3896 * Helpers for multiply_to_len().
3897 */
3898 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3899 Register src1, Register src2) {
3900 adds(dest_lo, dest_lo, src1);
3901 adc(dest_hi, dest_hi, zr);
3902 adds(dest_lo, dest_lo, src2);
3903 adc(final_dest_hi, dest_hi, zr);
3904 }
3905
3906 // Generate an address from (r + r1 extend offset). "size" is the
3907 // size of the operand. The result may be in rscratch2.
3908 Address MacroAssembler::offsetted_address(Register r, Register r1,
3909 Address::extend ext, int offset, int size) {
3910 if (offset || (ext.shift() % size != 0)) {
3911 lea(rscratch2, Address(r, r1, ext));
3912 return Address(rscratch2, offset);
3913 } else {
3914 return Address(r, r1, ext);
3915 }
3916 }
3917
3918 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3919 {
3920 assert(offset >= 0, "spill to negative address?");
3921 // Offset reachable ?
3922 // Not aligned - 9 bits signed offset
3923 // Aligned - 12 bits unsigned offset shifted
3924 Register base = sp;
3925 if ((offset & (size-1)) && offset >= (1<<8)) {
3926 add(tmp, base, offset & ((1<<12)-1));
3927 base = tmp;
3928 offset &= -1u<<12;
3929 }
3930
3931 if (offset >= (1<<12) * size) {
3932 add(tmp, base, offset & (((1<<12)-1)<<12));
3933 base = tmp;
3934 offset &= ~(((1<<12)-1)<<12);
3935 }
3936
3937 return Address(base, offset);
3938 }
3939
3940 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3941 assert(offset >= 0, "spill to negative address?");
3942
3943 Register base = sp;
3944
3945 // An immediate offset in the range 0 to 255 which is multiplied
3946 // by the current vector or predicate register size in bytes.
3947 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3948 return Address(base, offset / sve_reg_size_in_bytes);
3949 }
3950
3951 add(tmp, base, offset);
3952 return Address(tmp);
3953 }
3954
3955 // Checks whether offset is aligned.
3956 // Returns true if it is, else false.
3957 bool MacroAssembler::merge_alignment_check(Register base,
3958 size_t size,
3959 int64_t cur_offset,
3960 int64_t prev_offset) const {
3961 if (AvoidUnalignedAccesses) {
3962 if (base == sp) {
3963 // Checks whether low offset if aligned to pair of registers.
3964 int64_t pair_mask = size * 2 - 1;
3965 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3966 return (offset & pair_mask) == 0;
3967 } else { // If base is not sp, we can't guarantee the access is aligned.
3968 return false;
3969 }
3970 } else {
3971 int64_t mask = size - 1;
3972 // Load/store pair instruction only supports element size aligned offset.
3973 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3974 }
3975 }
3976
3977 // Checks whether current and previous loads/stores can be merged.
3978 // Returns true if it can be merged, else false.
3979 bool MacroAssembler::ldst_can_merge(Register rt,
3980 const Address &adr,
3981 size_t cur_size_in_bytes,
3982 bool is_store) const {
3983 address prev = pc() - NativeInstruction::instruction_size;
3984 address last = code()->last_insn();
3985
3986 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3987 return false;
3988 }
3989
3990 if (adr.getMode() != Address::base_plus_offset || prev != last) {
3991 return false;
3992 }
3993
3994 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3995 size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3996
3997 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3998 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3999
4000 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
4001 return false;
4002 }
4003
4004 int64_t max_offset = 63 * prev_size_in_bytes;
4005 int64_t min_offset = -64 * prev_size_in_bytes;
4006
4007 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
4008
4009 // Only same base can be merged.
4010 if (adr.base() != prev_ldst->base()) {
4011 return false;
4012 }
4013
4014 int64_t cur_offset = adr.offset();
4015 int64_t prev_offset = prev_ldst->offset();
4016 size_t diff = abs(cur_offset - prev_offset);
4017 if (diff != prev_size_in_bytes) {
4018 return false;
4019 }
4020
4021 // Following cases can not be merged:
4022 // ldr x2, [x2, #8]
4023 // ldr x3, [x2, #16]
4024 // or:
4025 // ldr x2, [x3, #8]
4026 // ldr x2, [x3, #16]
4027 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
4028 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
4029 return false;
4030 }
4031
4032 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
4033 // Offset range must be in ldp/stp instruction's range.
4034 if (low_offset > max_offset || low_offset < min_offset) {
4035 return false;
4036 }
4037
4038 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
4039 return true;
4040 }
4041
4042 return false;
4043 }
4044
4045 // Merge current load/store with previous load/store into ldp/stp.
4046 void MacroAssembler::merge_ldst(Register rt,
4047 const Address &adr,
4048 size_t cur_size_in_bytes,
4049 bool is_store) {
4050
4051 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
4052
4053 Register rt_low, rt_high;
4054 address prev = pc() - NativeInstruction::instruction_size;
4055 NativeLdSt* prev_ldst = NativeLdSt_at(prev);
4056
4057 int64_t offset;
4058
4059 if (adr.offset() < prev_ldst->offset()) {
4060 offset = adr.offset();
4061 rt_low = rt;
4062 rt_high = prev_ldst->target();
4063 } else {
4064 offset = prev_ldst->offset();
4065 rt_low = prev_ldst->target();
4066 rt_high = rt;
4067 }
4068
4069 Address adr_p = Address(prev_ldst->base(), offset);
4070 // Overwrite previous generated binary.
4071 code_section()->set_end(prev);
4072
4073 const size_t sz = prev_ldst->size_in_bytes();
4074 assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
4075 if (!is_store) {
4076 BLOCK_COMMENT("merged ldr pair");
4077 if (sz == 8) {
4078 ldp(rt_low, rt_high, adr_p);
4079 } else {
4080 ldpw(rt_low, rt_high, adr_p);
4081 }
4082 } else {
4083 BLOCK_COMMENT("merged str pair");
4084 if (sz == 8) {
4085 stp(rt_low, rt_high, adr_p);
4086 } else {
4087 stpw(rt_low, rt_high, adr_p);
4088 }
4089 }
4090 }
4091
4092 /**
4093 * Multiply 64 bit by 64 bit first loop.
4094 */
4095 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
4096 Register y, Register y_idx, Register z,
4097 Register carry, Register product,
4098 Register idx, Register kdx) {
4099 //
4100 // jlong carry, x[], y[], z[];
4101 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4102 // huge_128 product = y[idx] * x[xstart] + carry;
4103 // z[kdx] = (jlong)product;
4104 // carry = (jlong)(product >>> 64);
4105 // }
4106 // z[xstart] = carry;
4107 //
4108
4109 Label L_first_loop, L_first_loop_exit;
4110 Label L_one_x, L_one_y, L_multiply;
4111
4112 subsw(xstart, xstart, 1);
4113 br(Assembler::MI, L_one_x);
4114
4115 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
4116 ldr(x_xstart, Address(rscratch1));
4117 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
4118
4119 bind(L_first_loop);
4120 subsw(idx, idx, 1);
4121 br(Assembler::MI, L_first_loop_exit);
4122 subsw(idx, idx, 1);
4123 br(Assembler::MI, L_one_y);
4124 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4125 ldr(y_idx, Address(rscratch1));
4126 ror(y_idx, y_idx, 32); // convert big-endian to little-endian
4127 bind(L_multiply);
4128
4129 // AArch64 has a multiply-accumulate instruction that we can't use
4130 // here because it has no way to process carries, so we have to use
4131 // separate add and adc instructions. Bah.
4132 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
4133 mul(product, x_xstart, y_idx);
4134 adds(product, product, carry);
4135 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
4136
4137 subw(kdx, kdx, 2);
4138 ror(product, product, 32); // back to big-endian
4139 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
4140
4141 b(L_first_loop);
4142
4143 bind(L_one_y);
4144 ldrw(y_idx, Address(y, 0));
4145 b(L_multiply);
4146
4147 bind(L_one_x);
4148 ldrw(x_xstart, Address(x, 0));
4149 b(L_first_loop);
4150
4151 bind(L_first_loop_exit);
4152 }
4153
4154 /**
4155 * Multiply 128 bit by 128. Unrolled inner loop.
4156 *
4157 */
4158 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
4159 Register carry, Register carry2,
4160 Register idx, Register jdx,
4161 Register yz_idx1, Register yz_idx2,
4162 Register tmp, Register tmp3, Register tmp4,
4163 Register tmp6, Register product_hi) {
4164
4165 // jlong carry, x[], y[], z[];
4166 // int kdx = ystart+1;
4167 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
4168 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
4169 // jlong carry2 = (jlong)(tmp3 >>> 64);
4170 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
4171 // carry = (jlong)(tmp4 >>> 64);
4172 // z[kdx+idx+1] = (jlong)tmp3;
4173 // z[kdx+idx] = (jlong)tmp4;
4174 // }
4175 // idx += 2;
4176 // if (idx > 0) {
4177 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
4178 // z[kdx+idx] = (jlong)yz_idx1;
4179 // carry = (jlong)(yz_idx1 >>> 64);
4180 // }
4181 //
4182
4183 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
4184
4185 lsrw(jdx, idx, 2);
4186
4187 bind(L_third_loop);
4188
4189 subsw(jdx, jdx, 1);
4190 br(Assembler::MI, L_third_loop_exit);
4191 subw(idx, idx, 4);
4192
4193 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4194
4195 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
4196
4197 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4198
4199 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
4200 ror(yz_idx2, yz_idx2, 32);
4201
4202 ldp(rscratch2, rscratch1, Address(tmp6, 0));
4203
4204 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4205 umulh(tmp4, product_hi, yz_idx1);
4206
4207 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
4208 ror(rscratch2, rscratch2, 32);
4209
4210 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
4211 umulh(carry2, product_hi, yz_idx2);
4212
4213 // propagate sum of both multiplications into carry:tmp4:tmp3
4214 adds(tmp3, tmp3, carry);
4215 adc(tmp4, tmp4, zr);
4216 adds(tmp3, tmp3, rscratch1);
4217 adcs(tmp4, tmp4, tmp);
4218 adc(carry, carry2, zr);
4219 adds(tmp4, tmp4, rscratch2);
4220 adc(carry, carry, zr);
4221
4222 ror(tmp3, tmp3, 32); // convert little-endian to big-endian
4223 ror(tmp4, tmp4, 32);
4224 stp(tmp4, tmp3, Address(tmp6, 0));
4225
4226 b(L_third_loop);
4227 bind (L_third_loop_exit);
4228
4229 andw (idx, idx, 0x3);
4230 cbz(idx, L_post_third_loop_done);
4231
4232 Label L_check_1;
4233 subsw(idx, idx, 2);
4234 br(Assembler::MI, L_check_1);
4235
4236 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4237 ldr(yz_idx1, Address(rscratch1, 0));
4238 ror(yz_idx1, yz_idx1, 32);
4239 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
4240 umulh(tmp4, product_hi, yz_idx1);
4241 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4242 ldr(yz_idx2, Address(rscratch1, 0));
4243 ror(yz_idx2, yz_idx2, 32);
4244
4245 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
4246
4247 ror(tmp3, tmp3, 32);
4248 str(tmp3, Address(rscratch1, 0));
4249
4250 bind (L_check_1);
4251
4252 andw (idx, idx, 0x1);
4253 subsw(idx, idx, 1);
4254 br(Assembler::MI, L_post_third_loop_done);
4255 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4256 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
4257 umulh(carry2, tmp4, product_hi);
4258 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4259
4260 add2_with_carry(carry2, tmp3, tmp4, carry);
4261
4262 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4263 extr(carry, carry2, tmp3, 32);
4264
4265 bind(L_post_third_loop_done);
4266 }
4267
4268 /**
4269 * Code for BigInteger::multiplyToLen() intrinsic.
4270 *
4271 * r0: x
4272 * r1: xlen
4273 * r2: y
4274 * r3: ylen
4275 * r4: z
4276 * r5: tmp0
4277 * r10: tmp1
4278 * r11: tmp2
4279 * r12: tmp3
4280 * r13: tmp4
4281 * r14: tmp5
4282 * r15: tmp6
4283 * r16: tmp7
4284 *
4285 */
4286 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
4287 Register z, Register tmp0,
4288 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
4289 Register tmp5, Register tmp6, Register product_hi) {
4290
4291 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
4292
4293 const Register idx = tmp1;
4294 const Register kdx = tmp2;
4295 const Register xstart = tmp3;
4296
4297 const Register y_idx = tmp4;
4298 const Register carry = tmp5;
4299 const Register product = xlen;
4300 const Register x_xstart = tmp0;
4301
4302 // First Loop.
4303 //
4304 // final static long LONG_MASK = 0xffffffffL;
4305 // int xstart = xlen - 1;
4306 // int ystart = ylen - 1;
4307 // long carry = 0;
4308 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4309 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
4310 // z[kdx] = (int)product;
4311 // carry = product >>> 32;
4312 // }
4313 // z[xstart] = (int)carry;
4314 //
4315
4316 movw(idx, ylen); // idx = ylen;
4317 addw(kdx, xlen, ylen); // kdx = xlen+ylen;
4318 mov(carry, zr); // carry = 0;
4319
4320 Label L_done;
4321
4322 movw(xstart, xlen);
4323 subsw(xstart, xstart, 1);
4324 br(Assembler::MI, L_done);
4325
4326 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
4327
4328 Label L_second_loop;
4329 cbzw(kdx, L_second_loop);
4330
4331 Label L_carry;
4332 subw(kdx, kdx, 1);
4333 cbzw(kdx, L_carry);
4334
4335 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4336 lsr(carry, carry, 32);
4337 subw(kdx, kdx, 1);
4338
4339 bind(L_carry);
4340 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4341
4342 // Second and third (nested) loops.
4343 //
4344 // for (int i = xstart-1; i >= 0; i--) { // Second loop
4345 // carry = 0;
4346 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
4347 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
4348 // (z[k] & LONG_MASK) + carry;
4349 // z[k] = (int)product;
4350 // carry = product >>> 32;
4351 // }
4352 // z[i] = (int)carry;
4353 // }
4354 //
4355 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
4356
4357 const Register jdx = tmp1;
4358
4359 bind(L_second_loop);
4360 mov(carry, zr); // carry = 0;
4361 movw(jdx, ylen); // j = ystart+1
4362
4363 subsw(xstart, xstart, 1); // i = xstart-1;
4364 br(Assembler::MI, L_done);
4365
4366 str(z, Address(pre(sp, -4 * wordSize)));
4367
4368 Label L_last_x;
4369 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
4370 subsw(xstart, xstart, 1); // i = xstart-1;
4371 br(Assembler::MI, L_last_x);
4372
4373 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4374 ldr(product_hi, Address(rscratch1));
4375 ror(product_hi, product_hi, 32); // convert big-endian to little-endian
4376
4377 Label L_third_loop_prologue;
4378 bind(L_third_loop_prologue);
4379
4380 str(ylen, Address(sp, wordSize));
4381 stp(x, xstart, Address(sp, 2 * wordSize));
4382 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4383 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4384 ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4385 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
4386
4387 addw(tmp3, xlen, 1);
4388 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4389 subsw(tmp3, tmp3, 1);
4390 br(Assembler::MI, L_done);
4391
4392 lsr(carry, carry, 32);
4393 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4394 b(L_second_loop);
4395
4396 // Next infrequent code is moved outside loops.
4397 bind(L_last_x);
4398 ldrw(product_hi, Address(x, 0));
4399 b(L_third_loop_prologue);
4400
4401 bind(L_done);
4402 }
4403
4404 // Code for BigInteger::mulAdd intrinsic
4405 // out = r0
4406 // in = r1
4407 // offset = r2 (already out.length-offset)
4408 // len = r3
4409 // k = r4
4410 //
4411 // pseudo code from java implementation:
4412 // carry = 0;
4413 // offset = out.length-offset - 1;
4414 // for (int j=len-1; j >= 0; j--) {
4415 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4416 // out[offset--] = (int)product;
4417 // carry = product >>> 32;
4418 // }
4419 // return (int)carry;
4420 void MacroAssembler::mul_add(Register out, Register in, Register offset,
4421 Register len, Register k) {
4422 Label LOOP, END;
4423 // pre-loop
4424 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4425 csel(out, zr, out, Assembler::EQ);
4426 br(Assembler::EQ, END);
4427 add(in, in, len, LSL, 2); // in[j+1] address
4428 add(offset, out, offset, LSL, 2); // out[offset + 1] address
4429 mov(out, zr); // used to keep carry now
4430 BIND(LOOP);
4431 ldrw(rscratch1, Address(pre(in, -4)));
4432 madd(rscratch1, rscratch1, k, out);
4433 ldrw(rscratch2, Address(pre(offset, -4)));
4434 add(rscratch1, rscratch1, rscratch2);
4435 strw(rscratch1, Address(offset));
4436 lsr(out, rscratch1, 32);
4437 subs(len, len, 1);
4438 br(Assembler::NE, LOOP);
4439 BIND(END);
4440 }
4441
4442 /**
4443 * Emits code to update CRC-32 with a byte value according to constants in table
4444 *
4445 * @param [in,out]crc Register containing the crc.
4446 * @param [in]val Register containing the byte to fold into the CRC.
4447 * @param [in]table Register containing the table of crc constants.
4448 *
4449 * uint32_t crc;
4450 * val = crc_table[(val ^ crc) & 0xFF];
4451 * crc = val ^ (crc >> 8);
4452 *
4453 */
4454 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4455 eor(val, val, crc);
4456 andr(val, val, 0xff);
4457 ldrw(val, Address(table, val, Address::lsl(2)));
4458 eor(crc, val, crc, Assembler::LSR, 8);
4459 }
4460
4461 /**
4462 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4463 *
4464 * @param [in,out]crc Register containing the crc.
4465 * @param [in]v Register containing the 32-bit to fold into the CRC.
4466 * @param [in]table0 Register containing table 0 of crc constants.
4467 * @param [in]table1 Register containing table 1 of crc constants.
4468 * @param [in]table2 Register containing table 2 of crc constants.
4469 * @param [in]table3 Register containing table 3 of crc constants.
4470 *
4471 * uint32_t crc;
4472 * v = crc ^ v
4473 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4474 *
4475 */
4476 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4477 Register table0, Register table1, Register table2, Register table3,
4478 bool upper) {
4479 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4480 uxtb(tmp, v);
4481 ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4482 ubfx(tmp, v, 8, 8);
4483 ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4484 eor(crc, crc, tmp);
4485 ubfx(tmp, v, 16, 8);
4486 ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4487 eor(crc, crc, tmp);
4488 ubfx(tmp, v, 24, 8);
4489 ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4490 eor(crc, crc, tmp);
4491 }
4492
4493 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4494 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4495 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4496 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4497
4498 subs(tmp0, len, 384);
4499 mvnw(crc, crc);
4500 br(Assembler::GE, CRC_by128_pre);
4501 BIND(CRC_less128);
4502 subs(len, len, 32);
4503 br(Assembler::GE, CRC_by32_loop);
4504 BIND(CRC_less32);
4505 adds(len, len, 32 - 4);
4506 br(Assembler::GE, CRC_by4_loop);
4507 adds(len, len, 4);
4508 br(Assembler::GT, CRC_by1_loop);
4509 b(L_exit);
4510
4511 BIND(CRC_by32_loop);
4512 ldp(tmp0, tmp1, Address(buf));
4513 crc32x(crc, crc, tmp0);
4514 ldp(tmp2, tmp3, Address(buf, 16));
4515 crc32x(crc, crc, tmp1);
4516 add(buf, buf, 32);
4517 crc32x(crc, crc, tmp2);
4518 subs(len, len, 32);
4519 crc32x(crc, crc, tmp3);
4520 br(Assembler::GE, CRC_by32_loop);
4521 cmn(len, (u1)32);
4522 br(Assembler::NE, CRC_less32);
4523 b(L_exit);
4524
4525 BIND(CRC_by4_loop);
4526 ldrw(tmp0, Address(post(buf, 4)));
4527 subs(len, len, 4);
4528 crc32w(crc, crc, tmp0);
4529 br(Assembler::GE, CRC_by4_loop);
4530 adds(len, len, 4);
4531 br(Assembler::LE, L_exit);
4532 BIND(CRC_by1_loop);
4533 ldrb(tmp0, Address(post(buf, 1)));
4534 subs(len, len, 1);
4535 crc32b(crc, crc, tmp0);
4536 br(Assembler::GT, CRC_by1_loop);
4537 b(L_exit);
4538
4539 BIND(CRC_by128_pre);
4540 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4541 4*256*sizeof(juint) + 8*sizeof(juint));
4542 mov(crc, 0);
4543 crc32x(crc, crc, tmp0);
4544 crc32x(crc, crc, tmp1);
4545
4546 cbnz(len, CRC_less128);
4547
4548 BIND(L_exit);
4549 mvnw(crc, crc);
4550 }
4551
4552 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4553 Register len, Register tmp0, Register tmp1, Register tmp2,
4554 Register tmp3) {
4555 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4556 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4557
4558 mvnw(crc, crc);
4559
4560 subs(len, len, 128);
4561 br(Assembler::GE, CRC_by64_pre);
4562 BIND(CRC_less64);
4563 adds(len, len, 128-32);
4564 br(Assembler::GE, CRC_by32_loop);
4565 BIND(CRC_less32);
4566 adds(len, len, 32-4);
4567 br(Assembler::GE, CRC_by4_loop);
4568 adds(len, len, 4);
4569 br(Assembler::GT, CRC_by1_loop);
4570 b(L_exit);
4571
4572 BIND(CRC_by32_loop);
4573 ldp(tmp0, tmp1, Address(post(buf, 16)));
4574 subs(len, len, 32);
4575 crc32x(crc, crc, tmp0);
4576 ldr(tmp2, Address(post(buf, 8)));
4577 crc32x(crc, crc, tmp1);
4578 ldr(tmp3, Address(post(buf, 8)));
4579 crc32x(crc, crc, tmp2);
4580 crc32x(crc, crc, tmp3);
4581 br(Assembler::GE, CRC_by32_loop);
4582 cmn(len, (u1)32);
4583 br(Assembler::NE, CRC_less32);
4584 b(L_exit);
4585
4586 BIND(CRC_by4_loop);
4587 ldrw(tmp0, Address(post(buf, 4)));
4588 subs(len, len, 4);
4589 crc32w(crc, crc, tmp0);
4590 br(Assembler::GE, CRC_by4_loop);
4591 adds(len, len, 4);
4592 br(Assembler::LE, L_exit);
4593 BIND(CRC_by1_loop);
4594 ldrb(tmp0, Address(post(buf, 1)));
4595 subs(len, len, 1);
4596 crc32b(crc, crc, tmp0);
4597 br(Assembler::GT, CRC_by1_loop);
4598 b(L_exit);
4599
4600 BIND(CRC_by64_pre);
4601 sub(buf, buf, 8);
4602 ldp(tmp0, tmp1, Address(buf, 8));
4603 crc32x(crc, crc, tmp0);
4604 ldr(tmp2, Address(buf, 24));
4605 crc32x(crc, crc, tmp1);
4606 ldr(tmp3, Address(buf, 32));
4607 crc32x(crc, crc, tmp2);
4608 ldr(tmp0, Address(buf, 40));
4609 crc32x(crc, crc, tmp3);
4610 ldr(tmp1, Address(buf, 48));
4611 crc32x(crc, crc, tmp0);
4612 ldr(tmp2, Address(buf, 56));
4613 crc32x(crc, crc, tmp1);
4614 ldr(tmp3, Address(pre(buf, 64)));
4615
4616 b(CRC_by64_loop);
4617
4618 align(CodeEntryAlignment);
4619 BIND(CRC_by64_loop);
4620 subs(len, len, 64);
4621 crc32x(crc, crc, tmp2);
4622 ldr(tmp0, Address(buf, 8));
4623 crc32x(crc, crc, tmp3);
4624 ldr(tmp1, Address(buf, 16));
4625 crc32x(crc, crc, tmp0);
4626 ldr(tmp2, Address(buf, 24));
4627 crc32x(crc, crc, tmp1);
4628 ldr(tmp3, Address(buf, 32));
4629 crc32x(crc, crc, tmp2);
4630 ldr(tmp0, Address(buf, 40));
4631 crc32x(crc, crc, tmp3);
4632 ldr(tmp1, Address(buf, 48));
4633 crc32x(crc, crc, tmp0);
4634 ldr(tmp2, Address(buf, 56));
4635 crc32x(crc, crc, tmp1);
4636 ldr(tmp3, Address(pre(buf, 64)));
4637 br(Assembler::GE, CRC_by64_loop);
4638
4639 // post-loop
4640 crc32x(crc, crc, tmp2);
4641 crc32x(crc, crc, tmp3);
4642
4643 sub(len, len, 64);
4644 add(buf, buf, 8);
4645 cmn(len, (u1)128);
4646 br(Assembler::NE, CRC_less64);
4647 BIND(L_exit);
4648 mvnw(crc, crc);
4649 }
4650
4651 /**
4652 * @param crc register containing existing CRC (32-bit)
4653 * @param buf register pointing to input byte buffer (byte*)
4654 * @param len register containing number of bytes
4655 * @param table register that will contain address of CRC table
4656 * @param tmp scratch register
4657 */
4658 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4659 Register table0, Register table1, Register table2, Register table3,
4660 Register tmp, Register tmp2, Register tmp3) {
4661 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4662
4663 if (UseCryptoPmullForCRC32) {
4664 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4665 return;
4666 }
4667
4668 if (UseCRC32) {
4669 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4670 return;
4671 }
4672
4673 mvnw(crc, crc);
4674
4675 {
4676 uint64_t offset;
4677 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4678 add(table0, table0, offset);
4679 }
4680 add(table1, table0, 1*256*sizeof(juint));
4681 add(table2, table0, 2*256*sizeof(juint));
4682 add(table3, table0, 3*256*sizeof(juint));
4683
4684 { // Neon code start
4685 cmp(len, (u1)64);
4686 br(Assembler::LT, L_by16);
4687 eor(v16, T16B, v16, v16);
4688
4689 Label L_fold;
4690
4691 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4692
4693 ld1(v0, v1, T2D, post(buf, 32));
4694 ld1r(v4, T2D, post(tmp, 8));
4695 ld1r(v5, T2D, post(tmp, 8));
4696 ld1r(v6, T2D, post(tmp, 8));
4697 ld1r(v7, T2D, post(tmp, 8));
4698 mov(v16, S, 0, crc);
4699
4700 eor(v0, T16B, v0, v16);
4701 sub(len, len, 64);
4702
4703 BIND(L_fold);
4704 pmull(v22, T8H, v0, v5, T8B);
4705 pmull(v20, T8H, v0, v7, T8B);
4706 pmull(v23, T8H, v0, v4, T8B);
4707 pmull(v21, T8H, v0, v6, T8B);
4708
4709 pmull2(v18, T8H, v0, v5, T16B);
4710 pmull2(v16, T8H, v0, v7, T16B);
4711 pmull2(v19, T8H, v0, v4, T16B);
4712 pmull2(v17, T8H, v0, v6, T16B);
4713
4714 uzp1(v24, T8H, v20, v22);
4715 uzp2(v25, T8H, v20, v22);
4716 eor(v20, T16B, v24, v25);
4717
4718 uzp1(v26, T8H, v16, v18);
4719 uzp2(v27, T8H, v16, v18);
4720 eor(v16, T16B, v26, v27);
4721
4722 ushll2(v22, T4S, v20, T8H, 8);
4723 ushll(v20, T4S, v20, T4H, 8);
4724
4725 ushll2(v18, T4S, v16, T8H, 8);
4726 ushll(v16, T4S, v16, T4H, 8);
4727
4728 eor(v22, T16B, v23, v22);
4729 eor(v18, T16B, v19, v18);
4730 eor(v20, T16B, v21, v20);
4731 eor(v16, T16B, v17, v16);
4732
4733 uzp1(v17, T2D, v16, v20);
4734 uzp2(v21, T2D, v16, v20);
4735 eor(v17, T16B, v17, v21);
4736
4737 ushll2(v20, T2D, v17, T4S, 16);
4738 ushll(v16, T2D, v17, T2S, 16);
4739
4740 eor(v20, T16B, v20, v22);
4741 eor(v16, T16B, v16, v18);
4742
4743 uzp1(v17, T2D, v20, v16);
4744 uzp2(v21, T2D, v20, v16);
4745 eor(v28, T16B, v17, v21);
4746
4747 pmull(v22, T8H, v1, v5, T8B);
4748 pmull(v20, T8H, v1, v7, T8B);
4749 pmull(v23, T8H, v1, v4, T8B);
4750 pmull(v21, T8H, v1, v6, T8B);
4751
4752 pmull2(v18, T8H, v1, v5, T16B);
4753 pmull2(v16, T8H, v1, v7, T16B);
4754 pmull2(v19, T8H, v1, v4, T16B);
4755 pmull2(v17, T8H, v1, v6, T16B);
4756
4757 ld1(v0, v1, T2D, post(buf, 32));
4758
4759 uzp1(v24, T8H, v20, v22);
4760 uzp2(v25, T8H, v20, v22);
4761 eor(v20, T16B, v24, v25);
4762
4763 uzp1(v26, T8H, v16, v18);
4764 uzp2(v27, T8H, v16, v18);
4765 eor(v16, T16B, v26, v27);
4766
4767 ushll2(v22, T4S, v20, T8H, 8);
4768 ushll(v20, T4S, v20, T4H, 8);
4769
4770 ushll2(v18, T4S, v16, T8H, 8);
4771 ushll(v16, T4S, v16, T4H, 8);
4772
4773 eor(v22, T16B, v23, v22);
4774 eor(v18, T16B, v19, v18);
4775 eor(v20, T16B, v21, v20);
4776 eor(v16, T16B, v17, v16);
4777
4778 uzp1(v17, T2D, v16, v20);
4779 uzp2(v21, T2D, v16, v20);
4780 eor(v16, T16B, v17, v21);
4781
4782 ushll2(v20, T2D, v16, T4S, 16);
4783 ushll(v16, T2D, v16, T2S, 16);
4784
4785 eor(v20, T16B, v22, v20);
4786 eor(v16, T16B, v16, v18);
4787
4788 uzp1(v17, T2D, v20, v16);
4789 uzp2(v21, T2D, v20, v16);
4790 eor(v20, T16B, v17, v21);
4791
4792 shl(v16, T2D, v28, 1);
4793 shl(v17, T2D, v20, 1);
4794
4795 eor(v0, T16B, v0, v16);
4796 eor(v1, T16B, v1, v17);
4797
4798 subs(len, len, 32);
4799 br(Assembler::GE, L_fold);
4800
4801 mov(crc, 0);
4802 mov(tmp, v0, D, 0);
4803 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4804 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4805 mov(tmp, v0, D, 1);
4806 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4807 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4808 mov(tmp, v1, D, 0);
4809 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4810 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4811 mov(tmp, v1, D, 1);
4812 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4813 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4814
4815 add(len, len, 32);
4816 } // Neon code end
4817
4818 BIND(L_by16);
4819 subs(len, len, 16);
4820 br(Assembler::GE, L_by16_loop);
4821 adds(len, len, 16-4);
4822 br(Assembler::GE, L_by4_loop);
4823 adds(len, len, 4);
4824 br(Assembler::GT, L_by1_loop);
4825 b(L_exit);
4826
4827 BIND(L_by4_loop);
4828 ldrw(tmp, Address(post(buf, 4)));
4829 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4830 subs(len, len, 4);
4831 br(Assembler::GE, L_by4_loop);
4832 adds(len, len, 4);
4833 br(Assembler::LE, L_exit);
4834 BIND(L_by1_loop);
4835 subs(len, len, 1);
4836 ldrb(tmp, Address(post(buf, 1)));
4837 update_byte_crc32(crc, tmp, table0);
4838 br(Assembler::GT, L_by1_loop);
4839 b(L_exit);
4840
4841 align(CodeEntryAlignment);
4842 BIND(L_by16_loop);
4843 subs(len, len, 16);
4844 ldp(tmp, tmp3, Address(post(buf, 16)));
4845 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4846 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4847 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4848 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4849 br(Assembler::GE, L_by16_loop);
4850 adds(len, len, 16-4);
4851 br(Assembler::GE, L_by4_loop);
4852 adds(len, len, 4);
4853 br(Assembler::GT, L_by1_loop);
4854 BIND(L_exit);
4855 mvnw(crc, crc);
4856 }
4857
4858 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4859 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4860 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4861 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4862
4863 subs(tmp0, len, 384);
4864 br(Assembler::GE, CRC_by128_pre);
4865 BIND(CRC_less128);
4866 subs(len, len, 32);
4867 br(Assembler::GE, CRC_by32_loop);
4868 BIND(CRC_less32);
4869 adds(len, len, 32 - 4);
4870 br(Assembler::GE, CRC_by4_loop);
4871 adds(len, len, 4);
4872 br(Assembler::GT, CRC_by1_loop);
4873 b(L_exit);
4874
4875 BIND(CRC_by32_loop);
4876 ldp(tmp0, tmp1, Address(buf));
4877 crc32cx(crc, crc, tmp0);
4878 ldr(tmp2, Address(buf, 16));
4879 crc32cx(crc, crc, tmp1);
4880 ldr(tmp3, Address(buf, 24));
4881 crc32cx(crc, crc, tmp2);
4882 add(buf, buf, 32);
4883 subs(len, len, 32);
4884 crc32cx(crc, crc, tmp3);
4885 br(Assembler::GE, CRC_by32_loop);
4886 cmn(len, (u1)32);
4887 br(Assembler::NE, CRC_less32);
4888 b(L_exit);
4889
4890 BIND(CRC_by4_loop);
4891 ldrw(tmp0, Address(post(buf, 4)));
4892 subs(len, len, 4);
4893 crc32cw(crc, crc, tmp0);
4894 br(Assembler::GE, CRC_by4_loop);
4895 adds(len, len, 4);
4896 br(Assembler::LE, L_exit);
4897 BIND(CRC_by1_loop);
4898 ldrb(tmp0, Address(post(buf, 1)));
4899 subs(len, len, 1);
4900 crc32cb(crc, crc, tmp0);
4901 br(Assembler::GT, CRC_by1_loop);
4902 b(L_exit);
4903
4904 BIND(CRC_by128_pre);
4905 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4906 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4907 mov(crc, 0);
4908 crc32cx(crc, crc, tmp0);
4909 crc32cx(crc, crc, tmp1);
4910
4911 cbnz(len, CRC_less128);
4912
4913 BIND(L_exit);
4914 }
4915
4916 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4917 Register len, Register tmp0, Register tmp1, Register tmp2,
4918 Register tmp3) {
4919 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4920 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4921
4922 subs(len, len, 128);
4923 br(Assembler::GE, CRC_by64_pre);
4924 BIND(CRC_less64);
4925 adds(len, len, 128-32);
4926 br(Assembler::GE, CRC_by32_loop);
4927 BIND(CRC_less32);
4928 adds(len, len, 32-4);
4929 br(Assembler::GE, CRC_by4_loop);
4930 adds(len, len, 4);
4931 br(Assembler::GT, CRC_by1_loop);
4932 b(L_exit);
4933
4934 BIND(CRC_by32_loop);
4935 ldp(tmp0, tmp1, Address(post(buf, 16)));
4936 subs(len, len, 32);
4937 crc32cx(crc, crc, tmp0);
4938 ldr(tmp2, Address(post(buf, 8)));
4939 crc32cx(crc, crc, tmp1);
4940 ldr(tmp3, Address(post(buf, 8)));
4941 crc32cx(crc, crc, tmp2);
4942 crc32cx(crc, crc, tmp3);
4943 br(Assembler::GE, CRC_by32_loop);
4944 cmn(len, (u1)32);
4945 br(Assembler::NE, CRC_less32);
4946 b(L_exit);
4947
4948 BIND(CRC_by4_loop);
4949 ldrw(tmp0, Address(post(buf, 4)));
4950 subs(len, len, 4);
4951 crc32cw(crc, crc, tmp0);
4952 br(Assembler::GE, CRC_by4_loop);
4953 adds(len, len, 4);
4954 br(Assembler::LE, L_exit);
4955 BIND(CRC_by1_loop);
4956 ldrb(tmp0, Address(post(buf, 1)));
4957 subs(len, len, 1);
4958 crc32cb(crc, crc, tmp0);
4959 br(Assembler::GT, CRC_by1_loop);
4960 b(L_exit);
4961
4962 BIND(CRC_by64_pre);
4963 sub(buf, buf, 8);
4964 ldp(tmp0, tmp1, Address(buf, 8));
4965 crc32cx(crc, crc, tmp0);
4966 ldr(tmp2, Address(buf, 24));
4967 crc32cx(crc, crc, tmp1);
4968 ldr(tmp3, Address(buf, 32));
4969 crc32cx(crc, crc, tmp2);
4970 ldr(tmp0, Address(buf, 40));
4971 crc32cx(crc, crc, tmp3);
4972 ldr(tmp1, Address(buf, 48));
4973 crc32cx(crc, crc, tmp0);
4974 ldr(tmp2, Address(buf, 56));
4975 crc32cx(crc, crc, tmp1);
4976 ldr(tmp3, Address(pre(buf, 64)));
4977
4978 b(CRC_by64_loop);
4979
4980 align(CodeEntryAlignment);
4981 BIND(CRC_by64_loop);
4982 subs(len, len, 64);
4983 crc32cx(crc, crc, tmp2);
4984 ldr(tmp0, Address(buf, 8));
4985 crc32cx(crc, crc, tmp3);
4986 ldr(tmp1, Address(buf, 16));
4987 crc32cx(crc, crc, tmp0);
4988 ldr(tmp2, Address(buf, 24));
4989 crc32cx(crc, crc, tmp1);
4990 ldr(tmp3, Address(buf, 32));
4991 crc32cx(crc, crc, tmp2);
4992 ldr(tmp0, Address(buf, 40));
4993 crc32cx(crc, crc, tmp3);
4994 ldr(tmp1, Address(buf, 48));
4995 crc32cx(crc, crc, tmp0);
4996 ldr(tmp2, Address(buf, 56));
4997 crc32cx(crc, crc, tmp1);
4998 ldr(tmp3, Address(pre(buf, 64)));
4999 br(Assembler::GE, CRC_by64_loop);
5000
5001 // post-loop
5002 crc32cx(crc, crc, tmp2);
5003 crc32cx(crc, crc, tmp3);
5004
5005 sub(len, len, 64);
5006 add(buf, buf, 8);
5007 cmn(len, (u1)128);
5008 br(Assembler::NE, CRC_less64);
5009 BIND(L_exit);
5010 }
5011
5012 /**
5013 * @param crc register containing existing CRC (32-bit)
5014 * @param buf register pointing to input byte buffer (byte*)
5015 * @param len register containing number of bytes
5016 * @param table register that will contain address of CRC table
5017 * @param tmp scratch register
5018 */
5019 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
5020 Register table0, Register table1, Register table2, Register table3,
5021 Register tmp, Register tmp2, Register tmp3) {
5022 if (UseCryptoPmullForCRC32) {
5023 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
5024 } else {
5025 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
5026 }
5027 }
5028
5029 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
5030 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
5031 Label CRC_by128_loop;
5032 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
5033
5034 sub(len, len, 256);
5035 Register table = tmp0;
5036 {
5037 uint64_t offset;
5038 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
5039 add(table, table, offset);
5040 }
5041 add(table, table, table_offset);
5042
5043 // Registers v0..v7 are used as data registers.
5044 // Registers v16..v31 are used as tmp registers.
5045 sub(buf, buf, 0x10);
5046 ldrq(v0, Address(buf, 0x10));
5047 ldrq(v1, Address(buf, 0x20));
5048 ldrq(v2, Address(buf, 0x30));
5049 ldrq(v3, Address(buf, 0x40));
5050 ldrq(v4, Address(buf, 0x50));
5051 ldrq(v5, Address(buf, 0x60));
5052 ldrq(v6, Address(buf, 0x70));
5053 ldrq(v7, Address(pre(buf, 0x80)));
5054
5055 movi(v31, T4S, 0);
5056 mov(v31, S, 0, crc);
5057 eor(v0, T16B, v0, v31);
5058
5059 // Register v16 contains constants from the crc table.
5060 ldrq(v16, Address(table));
5061 b(CRC_by128_loop);
5062
5063 align(OptoLoopAlignment);
5064 BIND(CRC_by128_loop);
5065 pmull (v17, T1Q, v0, v16, T1D);
5066 pmull2(v18, T1Q, v0, v16, T2D);
5067 ldrq(v0, Address(buf, 0x10));
5068 eor3(v0, T16B, v17, v18, v0);
5069
5070 pmull (v19, T1Q, v1, v16, T1D);
5071 pmull2(v20, T1Q, v1, v16, T2D);
5072 ldrq(v1, Address(buf, 0x20));
5073 eor3(v1, T16B, v19, v20, v1);
5074
5075 pmull (v21, T1Q, v2, v16, T1D);
5076 pmull2(v22, T1Q, v2, v16, T2D);
5077 ldrq(v2, Address(buf, 0x30));
5078 eor3(v2, T16B, v21, v22, v2);
5079
5080 pmull (v23, T1Q, v3, v16, T1D);
5081 pmull2(v24, T1Q, v3, v16, T2D);
5082 ldrq(v3, Address(buf, 0x40));
5083 eor3(v3, T16B, v23, v24, v3);
5084
5085 pmull (v25, T1Q, v4, v16, T1D);
5086 pmull2(v26, T1Q, v4, v16, T2D);
5087 ldrq(v4, Address(buf, 0x50));
5088 eor3(v4, T16B, v25, v26, v4);
5089
5090 pmull (v27, T1Q, v5, v16, T1D);
5091 pmull2(v28, T1Q, v5, v16, T2D);
5092 ldrq(v5, Address(buf, 0x60));
5093 eor3(v5, T16B, v27, v28, v5);
5094
5095 pmull (v29, T1Q, v6, v16, T1D);
5096 pmull2(v30, T1Q, v6, v16, T2D);
5097 ldrq(v6, Address(buf, 0x70));
5098 eor3(v6, T16B, v29, v30, v6);
5099
5100 // Reuse registers v23, v24.
5101 // Using them won't block the first instruction of the next iteration.
5102 pmull (v23, T1Q, v7, v16, T1D);
5103 pmull2(v24, T1Q, v7, v16, T2D);
5104 ldrq(v7, Address(pre(buf, 0x80)));
5105 eor3(v7, T16B, v23, v24, v7);
5106
5107 subs(len, len, 0x80);
5108 br(Assembler::GE, CRC_by128_loop);
5109
5110 // fold into 512 bits
5111 // Use v31 for constants because v16 can be still in use.
5112 ldrq(v31, Address(table, 0x10));
5113
5114 pmull (v17, T1Q, v0, v31, T1D);
5115 pmull2(v18, T1Q, v0, v31, T2D);
5116 eor3(v0, T16B, v17, v18, v4);
5117
5118 pmull (v19, T1Q, v1, v31, T1D);
5119 pmull2(v20, T1Q, v1, v31, T2D);
5120 eor3(v1, T16B, v19, v20, v5);
5121
5122 pmull (v21, T1Q, v2, v31, T1D);
5123 pmull2(v22, T1Q, v2, v31, T2D);
5124 eor3(v2, T16B, v21, v22, v6);
5125
5126 pmull (v23, T1Q, v3, v31, T1D);
5127 pmull2(v24, T1Q, v3, v31, T2D);
5128 eor3(v3, T16B, v23, v24, v7);
5129
5130 // fold into 128 bits
5131 // Use v17 for constants because v31 can be still in use.
5132 ldrq(v17, Address(table, 0x20));
5133 pmull (v25, T1Q, v0, v17, T1D);
5134 pmull2(v26, T1Q, v0, v17, T2D);
5135 eor3(v3, T16B, v3, v25, v26);
5136
5137 // Use v18 for constants because v17 can be still in use.
5138 ldrq(v18, Address(table, 0x30));
5139 pmull (v27, T1Q, v1, v18, T1D);
5140 pmull2(v28, T1Q, v1, v18, T2D);
5141 eor3(v3, T16B, v3, v27, v28);
5142
5143 // Use v19 for constants because v18 can be still in use.
5144 ldrq(v19, Address(table, 0x40));
5145 pmull (v29, T1Q, v2, v19, T1D);
5146 pmull2(v30, T1Q, v2, v19, T2D);
5147 eor3(v0, T16B, v3, v29, v30);
5148
5149 add(len, len, 0x80);
5150 add(buf, buf, 0x10);
5151
5152 mov(tmp0, v0, D, 0);
5153 mov(tmp1, v0, D, 1);
5154 }
5155
5156 void MacroAssembler::addptr(const Address &dst, int32_t src) {
5157 Address adr;
5158 switch(dst.getMode()) {
5159 case Address::base_plus_offset:
5160 // This is the expected mode, although we allow all the other
5161 // forms below.
5162 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
5163 break;
5164 default:
5165 lea(rscratch2, dst);
5166 adr = Address(rscratch2);
5167 break;
5168 }
5169 ldr(rscratch1, adr);
5170 add(rscratch1, rscratch1, src);
5171 str(rscratch1, adr);
5172 }
5173
5174 void MacroAssembler::cmpptr(Register src1, Address src2) {
5175 uint64_t offset;
5176 adrp(rscratch1, src2, offset);
5177 ldr(rscratch1, Address(rscratch1, offset));
5178 cmp(src1, rscratch1);
5179 }
5180
5181 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
5182 cmp(obj1, obj2);
5183 }
5184
5185 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5186 load_method_holder(rresult, rmethod);
5187 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5188 }
5189
5190 void MacroAssembler::load_method_holder(Register holder, Register method) {
5191 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
5192 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5193 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5194 }
5195
5196 void MacroAssembler::load_metadata(Register dst, Register src) {
5197 if (UseCompactObjectHeaders) {
5198 load_narrow_klass_compact(dst, src);
5199 } else {
5200 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5201 }
5202 }
5203
5204 // Loads the obj's Klass* into dst.
5205 // Preserves all registers (incl src, rscratch1 and rscratch2).
5206 // Input:
5207 // src - the oop we want to load the klass from.
5208 // dst - output narrow klass.
5209 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5210 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
5211 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5212 lsr(dst, dst, markWord::klass_shift);
5213 }
5214
5215 void MacroAssembler::load_klass(Register dst, Register src) {
5216 if (UseCompactObjectHeaders) {
5217 load_narrow_klass_compact(dst, src);
5218 } else {
5219 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5220 }
5221 decode_klass_not_null(dst);
5222 }
5223
5224 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
5225 if (RestoreMXCSROnJNICalls) {
5226 Label OK;
5227 get_fpcr(tmp1);
5228 mov(tmp2, tmp1);
5229 // Set FPCR to the state we need. We do want Round to Nearest. We
5230 // don't want non-IEEE rounding modes or floating-point traps.
5231 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
5232 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
5233 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
5234 eor(tmp2, tmp1, tmp2);
5235 cbz(tmp2, OK); // Only reset FPCR if it's wrong
5236 set_fpcr(tmp1);
5237 bind(OK);
5238 }
5239 }
5240
5241 // ((OopHandle)result).resolve();
5242 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
5243 // OopHandle::resolve is an indirection.
5244 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
5245 }
5246
5247 // ((WeakHandle)result).resolve();
5248 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
5249 assert_different_registers(result, tmp1, tmp2);
5250 Label resolved;
5251
5252 // A null weak handle resolves to null.
5253 cbz(result, resolved);
5254
5255 // Only 64 bit platforms support GCs that require a tmp register
5256 // WeakHandle::resolve is an indirection like jweak.
5257 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5258 result, Address(result), tmp1, tmp2);
5259 bind(resolved);
5260 }
5261
5262 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5263 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5264 ldr(dst, Address(rmethod, Method::const_offset()));
5265 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5266 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5267 ldr(dst, Address(dst, mirror_offset));
5268 resolve_oop_handle(dst, tmp1, tmp2);
5269 }
5270
5271 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5272 assert_different_registers(obj, klass, tmp);
5273 if (UseCompactObjectHeaders) {
5274 load_narrow_klass_compact(tmp, obj);
5275 } else {
5276 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5277 }
5278 if (CompressedKlassPointers::base() == nullptr) {
5279 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5280 return;
5281 } else if (!AOTCodeCache::is_on_for_dump() &&
5282 ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5283 && CompressedKlassPointers::shift() == 0) {
5284 // Only the bottom 32 bits matter
5285 cmpw(klass, tmp);
5286 return;
5287 }
5288 decode_klass_not_null(tmp);
5289 cmp(klass, tmp);
5290 }
5291
5292 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5293 if (UseCompactObjectHeaders) {
5294 load_narrow_klass_compact(tmp1, obj1);
5295 load_narrow_klass_compact(tmp2, obj2);
5296 } else {
5297 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5298 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5299 }
5300 cmpw(tmp1, tmp2);
5301 }
5302
5303 void MacroAssembler::load_prototype_header(Register dst, Register src) {
5304 load_klass(dst, src);
5305 ldr(dst, Address(dst, Klass::prototype_header_offset()));
5306 }
5307
5308 void MacroAssembler::store_klass(Register dst, Register src) {
5309 // FIXME: Should this be a store release? concurrent gcs assumes
5310 // klass length is valid if klass field is not null.
5311 assert(!UseCompactObjectHeaders, "not with compact headers");
5312 encode_klass_not_null(src);
5313 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5314 }
5315
5316 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5317 assert(!UseCompactObjectHeaders, "not with compact headers");
5318 // Store to klass gap in destination
5319 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
5320 }
5321
5322 // Algorithm must match CompressedOops::encode.
5323 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5324 #ifdef ASSERT
5325 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5326 #endif
5327 verify_oop_msg(s, "broken oop in encode_heap_oop");
5328 if (CompressedOops::base() == nullptr) {
5329 if (CompressedOops::shift() != 0) {
5330 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5331 lsr(d, s, LogMinObjAlignmentInBytes);
5332 } else {
5333 mov(d, s);
5334 }
5335 } else {
5336 subs(d, s, rheapbase);
5337 csel(d, d, zr, Assembler::HS);
5338 lsr(d, d, LogMinObjAlignmentInBytes);
5339
5340 /* Old algorithm: is this any worse?
5341 Label nonnull;
5342 cbnz(r, nonnull);
5343 sub(r, r, rheapbase);
5344 bind(nonnull);
5345 lsr(r, r, LogMinObjAlignmentInBytes);
5346 */
5347 }
5348 }
5349
5350 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5351 #ifdef ASSERT
5352 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5353 if (CheckCompressedOops) {
5354 Label ok;
5355 cbnz(r, ok);
5356 stop("null oop passed to encode_heap_oop_not_null");
5357 bind(ok);
5358 }
5359 #endif
5360 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5361 if (CompressedOops::base() != nullptr) {
5362 sub(r, r, rheapbase);
5363 }
5364 if (CompressedOops::shift() != 0) {
5365 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5366 lsr(r, r, LogMinObjAlignmentInBytes);
5367 }
5368 }
5369
5370 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5371 #ifdef ASSERT
5372 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5373 if (CheckCompressedOops) {
5374 Label ok;
5375 cbnz(src, ok);
5376 stop("null oop passed to encode_heap_oop_not_null2");
5377 bind(ok);
5378 }
5379 #endif
5380 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5381
5382 Register data = src;
5383 if (CompressedOops::base() != nullptr) {
5384 sub(dst, src, rheapbase);
5385 data = dst;
5386 }
5387 if (CompressedOops::shift() != 0) {
5388 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5389 lsr(dst, data, LogMinObjAlignmentInBytes);
5390 data = dst;
5391 }
5392 if (data == src)
5393 mov(dst, src);
5394 }
5395
5396 void MacroAssembler::decode_heap_oop(Register d, Register s) {
5397 #ifdef ASSERT
5398 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5399 #endif
5400 if (CompressedOops::base() == nullptr) {
5401 if (CompressedOops::shift() != 0) {
5402 lsl(d, s, CompressedOops::shift());
5403 } else if (d != s) {
5404 mov(d, s);
5405 }
5406 } else {
5407 Label done;
5408 if (d != s)
5409 mov(d, s);
5410 cbz(s, done);
5411 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5412 bind(done);
5413 }
5414 verify_oop_msg(d, "broken oop in decode_heap_oop");
5415 }
5416
5417 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5418 assert (UseCompressedOops, "should only be used for compressed headers");
5419 assert (Universe::heap() != nullptr, "java heap should be initialized");
5420 // Cannot assert, unverified entry point counts instructions (see .ad file)
5421 // vtableStubs also counts instructions in pd_code_size_limit.
5422 // Also do not verify_oop as this is called by verify_oop.
5423 if (CompressedOops::shift() != 0) {
5424 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5425 if (CompressedOops::base() != nullptr) {
5426 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5427 } else {
5428 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5429 }
5430 } else {
5431 assert (CompressedOops::base() == nullptr, "sanity");
5432 }
5433 }
5434
5435 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5436 assert (UseCompressedOops, "should only be used for compressed headers");
5437 assert (Universe::heap() != nullptr, "java heap should be initialized");
5438 // Cannot assert, unverified entry point counts instructions (see .ad file)
5439 // vtableStubs also counts instructions in pd_code_size_limit.
5440 // Also do not verify_oop as this is called by verify_oop.
5441 if (CompressedOops::shift() != 0) {
5442 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5443 if (CompressedOops::base() != nullptr) {
5444 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5445 } else {
5446 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5447 }
5448 } else {
5449 assert (CompressedOops::base() == nullptr, "sanity");
5450 if (dst != src) {
5451 mov(dst, src);
5452 }
5453 }
5454 }
5455
5456 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5457
5458 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5459 assert(Metaspace::initialized(), "metaspace not initialized yet");
5460 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
5461 return _klass_decode_mode;
5462 }
5463
5464 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
5465 // KlassDecodeMode shouldn't be set already.
5466 assert(_klass_decode_mode == KlassDecodeNone, "set once");
5467
5468 if (base == nullptr) {
5469 return KlassDecodeZero;
5470 }
5471
5472 if (operand_valid_for_logical_immediate(
5473 /*is32*/false, (uint64_t)base)) {
5474 const uint64_t range_mask = right_n_bits(log2i_ceil(range));
5475 if (((uint64_t)base & range_mask) == 0) {
5476 return KlassDecodeXor;
5477 }
5478 }
5479
5480 const uint64_t shifted_base =
5481 (uint64_t)base >> shift;
5482 if ((shifted_base & 0xffff0000ffffffff) == 0) {
5483 return KlassDecodeMovk;
5484 }
5485
5486 // No valid encoding.
5487 return KlassDecodeNone;
5488 }
5489
5490 // Check if one of the above decoding modes will work for given base, shift and range.
5491 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
5492 return klass_decode_mode(base, shift, range) != KlassDecodeNone;
5493 }
5494
5495 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
5496 _klass_decode_mode = klass_decode_mode(base, shift, range);
5497 return _klass_decode_mode != KlassDecodeNone;
5498 }
5499
5500 static Register pick_different_tmp(Register dst, Register src) {
5501 auto tmps = RegSet::of(r0, r1, r2) - RegSet::of(src, dst);
5502 return *tmps.begin();
5503 }
5504
5505 void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) {
5506 // we have to load the klass base from the AOT constants area but
5507 // not the shift because it is not allowed to change
5508 int shift = CompressedKlassPointers::shift();
5509 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5510 if (dst != src) {
5511 // we can load the base into dst, subtract it formthe src and shift down
5512 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5513 ldr(dst, dst);
5514 sub(dst, src, dst);
5515 lsr(dst, dst, shift);
5516 } else {
5517 // we need an extra register in order to load the coop base
5518 Register tmp = pick_different_tmp(dst, src);
5519 RegSet regs = RegSet::of(tmp);
5520 push(regs, sp);
5521 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5522 ldr(tmp, tmp);
5523 sub(dst, src, tmp);
5524 lsr(dst, dst, shift);
5525 pop(regs, sp);
5526 }
5527 }
5528
5529 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5530 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5531 encode_klass_not_null_for_aot(dst, src);
5532 return;
5533 }
5534
5535 switch (klass_decode_mode()) {
5536 case KlassDecodeZero:
5537 if (CompressedKlassPointers::shift() != 0) {
5538 lsr(dst, src, CompressedKlassPointers::shift());
5539 } else {
5540 if (dst != src) mov(dst, src);
5541 }
5542 break;
5543
5544 case KlassDecodeXor:
5545 if (CompressedKlassPointers::shift() != 0) {
5546 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5547 lsr(dst, dst, CompressedKlassPointers::shift());
5548 } else {
5549 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5550 }
5551 break;
5552
5553 case KlassDecodeMovk:
5554 if (CompressedKlassPointers::shift() != 0) {
5555 ubfx(dst, src, CompressedKlassPointers::shift(), 32);
5556 } else {
5557 movw(dst, src);
5558 }
5559 break;
5560
5561 case KlassDecodeNone:
5562 ShouldNotReachHere();
5563 break;
5564 }
5565 }
5566
5567 void MacroAssembler::encode_klass_not_null(Register r) {
5568 encode_klass_not_null(r, r);
5569 }
5570
5571 void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
5572 // we have to load the klass base from the AOT constants area but
5573 // not the shift because it is not allowed to change
5574 int shift = CompressedKlassPointers::shift();
5575 assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!");
5576 if (dst != src) {
5577 // we can load the base into dst then add the offset with a suitable shift
5578 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5579 ldr(dst, dst);
5580 add(dst, dst, src, LSL, shift);
5581 } else {
5582 // we need an extra register in order to load the coop base
5583 Register tmp = pick_different_tmp(dst, src);
5584 RegSet regs = RegSet::of(tmp);
5585 push(regs, sp);
5586 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5587 ldr(tmp, tmp);
5588 add(dst, tmp, src, LSL, shift);
5589 pop(regs, sp);
5590 }
5591 }
5592
5593 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5594 if (AOTCodeCache::is_on_for_dump()) {
5595 decode_klass_not_null_for_aot(dst, src);
5596 return;
5597 }
5598
5599 switch (klass_decode_mode()) {
5600 case KlassDecodeZero:
5601 if (CompressedKlassPointers::shift() != 0) {
5602 lsl(dst, src, CompressedKlassPointers::shift());
5603 } else {
5604 if (dst != src) mov(dst, src);
5605 }
5606 break;
5607
5608 case KlassDecodeXor:
5609 if (CompressedKlassPointers::shift() != 0) {
5610 lsl(dst, src, CompressedKlassPointers::shift());
5611 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5612 } else {
5613 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5614 }
5615 break;
5616
5617 case KlassDecodeMovk: {
5618 const uint64_t shifted_base =
5619 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5620
5621 if (dst != src) movw(dst, src);
5622 movk(dst, shifted_base >> 32, 32);
5623
5624 if (CompressedKlassPointers::shift() != 0) {
5625 lsl(dst, dst, CompressedKlassPointers::shift());
5626 }
5627
5628 break;
5629 }
5630
5631 case KlassDecodeNone:
5632 ShouldNotReachHere();
5633 break;
5634 }
5635 }
5636
5637 void MacroAssembler::decode_klass_not_null(Register r) {
5638 decode_klass_not_null(r, r);
5639 }
5640
5641 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5642 #ifdef ASSERT
5643 {
5644 ThreadInVMfromUnknown tiv;
5645 assert (UseCompressedOops, "should only be used for compressed oops");
5646 assert (Universe::heap() != nullptr, "java heap should be initialized");
5647 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5648 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5649 }
5650 #endif
5651 int oop_index = oop_recorder()->find_index(obj);
5652 InstructionMark im(this);
5653 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5654 code_section()->relocate(inst_mark(), rspec);
5655 movz(dst, 0xDEAD, 16);
5656 movk(dst, 0xBEEF);
5657 }
5658
5659 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5660 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5661 int index = oop_recorder()->find_index(k);
5662
5663 InstructionMark im(this);
5664 RelocationHolder rspec = metadata_Relocation::spec(index);
5665 code_section()->relocate(inst_mark(), rspec);
5666 narrowKlass nk = CompressedKlassPointers::encode(k);
5667 movz(dst, (nk >> 16), 16);
5668 movk(dst, nk & 0xffff);
5669 }
5670
5671 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5672 Register dst, Address src,
5673 Register tmp1, Register tmp2) {
5674 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5675 decorators = AccessInternal::decorator_fixup(decorators, type);
5676 bool as_raw = (decorators & AS_RAW) != 0;
5677 if (as_raw) {
5678 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5679 } else {
5680 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5681 }
5682 }
5683
5684 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5685 Address dst, Register val,
5686 Register tmp1, Register tmp2, Register tmp3) {
5687 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5688 decorators = AccessInternal::decorator_fixup(decorators, type);
5689 bool as_raw = (decorators & AS_RAW) != 0;
5690 if (as_raw) {
5691 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5692 } else {
5693 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5694 }
5695 }
5696
5697 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5698 Register inline_layout_info) {
5699 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5700 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5701 }
5702
5703 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5704 ldr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
5705 ldrw(offset, Address(offset, InlineKlass::payload_offset_offset()));
5706 }
5707
5708 void MacroAssembler::payload_address(Register oop, Register data, Register inline_klass) {
5709 // ((address) (void*) o) + vk->payload_offset();
5710 Register offset = (data == oop) ? rscratch1 : data;
5711 payload_offset(inline_klass, offset);
5712 if (data == oop) {
5713 add(data, data, offset);
5714 } else {
5715 lea(data, Address(oop, offset));
5716 }
5717 }
5718
5719 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5720 Register index, Register data) {
5721 assert_different_registers(array, array_klass, index);
5722 assert_different_registers(rscratch1, array, index);
5723
5724 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5725 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
5726
5727 // Klass::layout_helper_log2_element_size(lh)
5728 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5729 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
5730 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
5731 lslv(index, index, rscratch1);
5732
5733 add(data, array, index);
5734 add(data, data, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT));
5735 }
5736
5737 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5738 Register tmp2, DecoratorSet decorators) {
5739 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5740 }
5741
5742 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5743 Register tmp2, DecoratorSet decorators) {
5744 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5745 }
5746
5747 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5748 Register tmp2, Register tmp3, DecoratorSet decorators) {
5749 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5750 }
5751
5752 // Used for storing nulls.
5753 void MacroAssembler::store_heap_oop_null(Address dst) {
5754 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5755 }
5756
5757 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5758 assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5759 int index = oop_recorder()->allocate_metadata_index(obj);
5760 RelocationHolder rspec = metadata_Relocation::spec(index);
5761 return Address((address)obj, rspec);
5762 }
5763
5764 // Move an oop into a register.
5765 void MacroAssembler::movoop(Register dst, jobject obj) {
5766 int oop_index;
5767 if (obj == nullptr) {
5768 oop_index = oop_recorder()->allocate_oop_index(obj);
5769 } else {
5770 #ifdef ASSERT
5771 {
5772 ThreadInVMfromUnknown tiv;
5773 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5774 }
5775 #endif
5776 oop_index = oop_recorder()->find_index(obj);
5777 }
5778 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5779
5780 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5781 mov(dst, Address((address)obj, rspec));
5782 } else {
5783 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5784 ldr(dst, Address(dummy, rspec));
5785 }
5786 }
5787
5788 // Move a metadata address into a register.
5789 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5790 int oop_index;
5791 if (obj == nullptr) {
5792 oop_index = oop_recorder()->allocate_metadata_index(obj);
5793 } else {
5794 oop_index = oop_recorder()->find_index(obj);
5795 }
5796 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5797 mov(dst, Address((address)obj, rspec));
5798 }
5799
5800 Address MacroAssembler::constant_oop_address(jobject obj) {
5801 #ifdef ASSERT
5802 {
5803 ThreadInVMfromUnknown tiv;
5804 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5805 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5806 }
5807 #endif
5808 int oop_index = oop_recorder()->find_index(obj);
5809 return Address((address)obj, oop_Relocation::spec(oop_index));
5810 }
5811
5812 // Object / value buffer allocation...
5813 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
5814 Register t1, Register t2,
5815 bool clear_fields, Label& alloc_failed)
5816 {
5817 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
5818 Register layout_size = t1;
5819 assert(new_obj == r0, "needs to be r0");
5820 assert_different_registers(klass, new_obj, t1, t2);
5821
5822 // get instance_size in InstanceKlass (scaled to a count of bytes)
5823 ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
5824 // test to see if it is malformed in some way
5825 tst(layout_size, Klass::_lh_instance_slow_path_bit);
5826 br(Assembler::NE, slow_case_no_pop);
5827
5828 // Allocate the instance:
5829 // If TLAB is enabled:
5830 // Try to allocate in the TLAB.
5831 // If fails, go to the slow path.
5832 // Initialize the allocation.
5833 // Exit.
5834 //
5835 // Go to slow path.
5836
5837 if (UseTLAB) {
5838 push(klass);
5839 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
5840 if (ZeroTLAB || (!clear_fields)) {
5841 // the fields have been already cleared
5842 b(initialize_header);
5843 } else {
5844 // initialize both the header and fields
5845 b(initialize_object);
5846 }
5847
5848 if (clear_fields) {
5849 // The object is initialized before the header. If the object size is
5850 // zero, go directly to the header initialization.
5851 bind(initialize_object);
5852 int header_size = oopDesc::header_size() * HeapWordSize;
5853 assert(is_aligned(header_size, BytesPerLong), "oop header size must be 8-byte-aligned");
5854 subs(layout_size, layout_size, header_size);
5855 br(Assembler::EQ, initialize_header);
5856
5857 // Initialize topmost object field, divide size by 8, check if odd and
5858 // test if zero.
5859
5860 #ifdef ASSERT
5861 // make sure instance_size was multiple of 8
5862 Label L;
5863 tst(layout_size, 7);
5864 br(Assembler::EQ, L);
5865 stop("object size is not multiple of 8 - adjust this code");
5866 bind(L);
5867 // must be > 0, no extra check needed here
5868 #endif
5869
5870 lsr(layout_size, layout_size, LogBytesPerLong);
5871
5872 // initialize remaining object fields: instance_size was a multiple of 8
5873 {
5874 Label loop;
5875 Register base = t2;
5876
5877 bind(loop);
5878 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
5879 str(zr, Address(rscratch1, header_size - 1*oopSize));
5880 subs(layout_size, layout_size, 1);
5881 br(Assembler::NE, loop);
5882 }
5883 } // clear_fields
5884
5885 // initialize object header only.
5886 bind(initialize_header);
5887 pop(klass);
5888 Register mark_word = t2;
5889 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
5890 ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
5891 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5892 } else {
5893 mov(mark_word, (intptr_t)markWord::prototype().value());
5894 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5895 }
5896 if (!UseCompactObjectHeaders) {
5897 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops
5898 mov(t2, klass); // preserve klass
5899 store_klass(new_obj, t2); // src klass reg is potentially compressed
5900 }
5901 b(done);
5902 }
5903
5904 if (UseTLAB) {
5905 bind(slow_case);
5906 pop(klass);
5907 }
5908 bind(slow_case_no_pop);
5909 b(alloc_failed);
5910
5911 bind(done);
5912 }
5913
5914 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5915 void MacroAssembler::tlab_allocate(Register obj,
5916 Register var_size_in_bytes,
5917 int con_size_in_bytes,
5918 Register t1,
5919 Register t2,
5920 Label& slow_case) {
5921 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5922 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5923 }
5924
5925 void MacroAssembler::verify_tlab() {
5926 #ifdef ASSERT
5927 if (UseTLAB && VerifyOops) {
5928 Label next, ok;
5929
5930 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5931
5932 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5933 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5934 cmp(rscratch2, rscratch1);
5935 br(Assembler::HS, next);
5936 STOP("assert(top >= start)");
5937 should_not_reach_here();
5938
5939 bind(next);
5940 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5941 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5942 cmp(rscratch2, rscratch1);
5943 br(Assembler::HS, ok);
5944 STOP("assert(top <= end)");
5945 should_not_reach_here();
5946
5947 bind(ok);
5948 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5949 }
5950 #endif
5951 }
5952
5953 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
5954 assert_different_registers(holder_klass, index, layout_info);
5955 InlineLayoutInfo array[2];
5956 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
5957 if (is_power_of_2(size)) {
5958 lsl(index, index, log2i_exact(size)); // Scale index by power of 2
5959 } else {
5960 mov(layout_info, size);
5961 mul(index, index, layout_info); // Scale the index to be the entry index * array_element_size
5962 }
5963 ldr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
5964 add(layout_info, layout_info, Array<InlineLayoutInfo>::base_offset_in_bytes());
5965 lea(layout_info, Address(layout_info, index));
5966 }
5967
5968 // Writes to stack successive pages until offset reached to check for
5969 // stack overflow + shadow pages. This clobbers tmp.
5970 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5971 assert_different_registers(tmp, size, rscratch1);
5972 mov(tmp, sp);
5973 // Bang stack for total size given plus shadow page size.
5974 // Bang one page at a time because large size can bang beyond yellow and
5975 // red zones.
5976 Label loop;
5977 mov(rscratch1, (int)os::vm_page_size());
5978 bind(loop);
5979 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5980 subsw(size, size, rscratch1);
5981 str(size, Address(tmp));
5982 br(Assembler::GT, loop);
5983
5984 // Bang down shadow pages too.
5985 // At this point, (tmp-0) is the last address touched, so don't
5986 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5987 // was post-decremented.) Skip this address by starting at i=1, and
5988 // touch a few more pages below. N.B. It is important to touch all
5989 // the way down to and including i=StackShadowPages.
5990 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5991 // this could be any sized move but this is can be a debugging crumb
5992 // so the bigger the better.
5993 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5994 str(size, Address(tmp));
5995 }
5996 }
5997
5998 // Move the address of the polling page into dest.
5999 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
6000 ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
6001 }
6002
6003 // Read the polling page. The address of the polling page must
6004 // already be in r.
6005 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
6006 address mark;
6007 {
6008 InstructionMark im(this);
6009 code_section()->relocate(inst_mark(), rtype);
6010 ldrw(zr, Address(r, 0));
6011 mark = inst_mark();
6012 }
6013 verify_cross_modify_fence_not_required();
6014 return mark;
6015 }
6016
6017 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
6018 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
6019 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
6020 uint64_t dest_page = (uint64_t)dest.target() >> 12;
6021 int64_t offset_low = dest_page - low_page;
6022 int64_t offset_high = dest_page - high_page;
6023
6024 assert(is_valid_AArch64_address(dest.target()), "bad address");
6025 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
6026
6027 InstructionMark im(this);
6028 code_section()->relocate(inst_mark(), dest.rspec());
6029 // 8143067: Ensure that the adrp can reach the dest from anywhere within
6030 // the code cache so that if it is relocated we know it will still reach
6031 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
6032 _adrp(reg1, dest.target());
6033 } else {
6034 uint64_t target = (uint64_t)dest.target();
6035 uint64_t adrp_target
6036 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
6037
6038 _adrp(reg1, (address)adrp_target);
6039 movk(reg1, target >> 32, 32);
6040 }
6041 byte_offset = (uint64_t)dest.target() & 0xfff;
6042 }
6043
6044 void MacroAssembler::load_byte_map_base(Register reg) {
6045 #if INCLUDE_CDS
6046 if (AOTCodeCache::is_on_for_dump()) {
6047 address byte_map_base_adr = AOTRuntimeConstants::card_table_base_address();
6048 lea(reg, ExternalAddress(byte_map_base_adr));
6049 ldr(reg, Address(reg));
6050 return;
6051 }
6052 #endif
6053 CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
6054
6055 // Strictly speaking the card table base isn't an address at all, and it might
6056 // even be negative. It is thus materialised as a constant.
6057 mov(reg, (uint64_t)ctbs->card_table_base_const());
6058 }
6059
6060 void MacroAssembler::load_aotrc_address(Register reg, address a) {
6061 #if INCLUDE_CDS
6062 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
6063 if (AOTCodeCache::is_on_for_dump()) {
6064 // all aotrc field addresses should be registered in the AOTCodeCache address table
6065 lea(reg, ExternalAddress(a));
6066 } else {
6067 mov(reg, (uint64_t)a);
6068 }
6069 #else
6070 ShouldNotReachHere();
6071 #endif
6072 }
6073
6074 #ifdef ASSERT
6075 void MacroAssembler::build_frame(int framesize) {
6076 build_frame(framesize, false);
6077 }
6078 #endif
6079
6080 void MacroAssembler::build_frame(int framesize DEBUG_ONLY(COMMA bool zap_rfp_lr_spills)) {
6081 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
6082 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6083 protect_return_address();
6084 if (framesize < ((1 << 9) + 2 * wordSize)) {
6085 sub(sp, sp, framesize);
6086 if (DEBUG_ONLY(zap_rfp_lr_spills ||) false) {
6087 mov_immediate64(rscratch1, ((uint64_t)badRegWordVal) << 32 | (uint64_t)badRegWordVal);
6088 stp(rscratch1, rscratch1, Address(sp, framesize - 2 * wordSize));
6089 } else {
6090 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6091 }
6092 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
6093 } else {
6094 if (DEBUG_ONLY(zap_rfp_lr_spills ||) false) {
6095 mov_immediate64(rscratch1, ((uint64_t)badRegWordVal) << 32 | (uint64_t)badRegWordVal);
6096 stp(rscratch1, rscratch1, Address(pre(sp, -2 * wordSize)));
6097 } else {
6098 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6099 }
6100 if (PreserveFramePointer) mov(rfp, sp);
6101 if (framesize < ((1 << 12) + 2 * wordSize))
6102 sub(sp, sp, framesize - 2 * wordSize);
6103 else {
6104 mov(rscratch1, framesize - 2 * wordSize);
6105 sub(sp, sp, rscratch1);
6106 }
6107 }
6108 verify_cross_modify_fence_not_required();
6109 }
6110
6111 void MacroAssembler::remove_frame(int framesize) {
6112 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
6113 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6114 if (framesize < ((1 << 9) + 2 * wordSize)) {
6115 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6116 add(sp, sp, framesize);
6117 } else {
6118 if (framesize < ((1 << 12) + 2 * wordSize))
6119 add(sp, sp, framesize - 2 * wordSize);
6120 else {
6121 mov(rscratch1, framesize - 2 * wordSize);
6122 add(sp, sp, rscratch1);
6123 }
6124 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6125 }
6126 authenticate_return_address();
6127 }
6128
6129 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6130 if (needs_stack_repair) {
6131 // The method has a scalarized entry point (where fields of value object arguments
6132 // are passed through registers and stack), and a non-scalarized entry point (where
6133 // value object arguments are given as oops). The non-scalarized entry point will
6134 // first load each field of value object arguments and store them in registers and on
6135 // the stack in a way compatible with the scalarized entry point. To do so, some extra
6136 // stack space might be reserved (if argument registers are not enough). On leaving the
6137 // method, this space must be freed.
6138 //
6139 // In case we used the non-scalarized entry point the stack looks like this:
6140 //
6141 // | Arguments from caller |
6142 // |---------------------------| <-- caller's SP
6143 // | Saved LR #1 |
6144 // | Saved FP #1 |
6145 // |---------------------------|
6146 // | Extension space for |
6147 // | inline arg (un)packing |
6148 // |---------------------------| <-- start of this method's frame
6149 // | Saved LR #2 |
6150 // | Saved FP #2 |
6151 // |---------------------------| <-- FP (with -XX:+PreserveFramePointer)
6152 // | sp_inc |
6153 // | method locals |
6154 // |---------------------------| <-- SP
6155 //
6156 // There are two copies of FP and LR on the stack. They will be identical at
6157 // first, but that can change.
6158 // If the caller has been deoptimized, LR #1 will be patched to point at the
6159 // deopt blob, and LR #2 will still point into the old method.
6160 // If the saved FP (x29) was not used as the frame pointer, but to store an
6161 // oop, the GC will be aware only of FP #1 as the spilled location of x29 and
6162 // will fix only this one. Overall, FP/LR #2 are not reliable and are simply
6163 // needed to add space between the extension space and the locals, as there
6164 // would be between the real arguments and the locals if we don't need to
6165 // do unpacking (from the scalarized entry point).
6166 //
6167 // When restoring, one must then load FP #1 into x29, and LR #1 into x30,
6168 // while keeping in mind that from the scalarized entry point, there will be
6169 // only one copy of each. Indeed, in the case we used the scalarized calling
6170 // convention, the stack looks like this:
6171 //
6172 // | Arguments from caller |
6173 // |---------------------------| <-- caller's SP / start of this method's frame
6174 // | Saved LR |
6175 // | Saved FP |
6176 // |---------------------------| <-- FP (with -XX:+PreserveFramePointer)
6177 // | sp_inc |
6178 // | method locals |
6179 // |---------------------------| <-- SP
6180 //
6181 // The sp_inc stack slot holds the total size of the frame including the
6182 // extension space minus two words for the saved FP and LR. That is how to
6183 // find FP/LR #1. This size is expressed in bytes. Be careful when using it
6184 // from C++ in pointer arithmetic; you might need to divide it by wordSize.
6185 //
6186 // One can find sp_inc since the start the method's frame is SP + initial_framesize.
6187
6188 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP
6189
6190 ldr(rscratch1, Address(sp, sp_inc_offset));
6191 add(sp, sp, rscratch1);
6192 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6193 } else {
6194 remove_frame(initial_framesize);
6195 }
6196 }
6197
6198 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
6199 int real_frame_size = frame_size + sp_inc;
6200 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
6201 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
6202 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6203
6204 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP
6205
6206 // Subtract two words for the saved FP and LR as these will be popped
6207 // separately. See remove_frame above.
6208 mov(rscratch1, real_frame_size - 2*wordSize);
6209 str(rscratch1, Address(sp, sp_inc_offset));
6210 }
6211
6212 // This method counts leading positive bytes (highest bit not set) in provided byte array
6213 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
6214 // Simple and most common case of aligned small array which is not at the
6215 // end of memory page is placed here. All other cases are in stub.
6216 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
6217 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
6218 assert_different_registers(ary1, len, result);
6219
6220 mov(result, len);
6221 cmpw(len, 0);
6222 br(LE, DONE);
6223 cmpw(len, 4 * wordSize);
6224 br(GE, STUB_LONG); // size > 32 then go to stub
6225
6226 int shift = 64 - exact_log2(os::vm_page_size());
6227 lsl(rscratch1, ary1, shift);
6228 mov(rscratch2, (size_t)(4 * wordSize) << shift);
6229 adds(rscratch2, rscratch1, rscratch2); // At end of page?
6230 br(CS, STUB); // at the end of page then go to stub
6231 subs(len, len, wordSize);
6232 br(LT, END);
6233
6234 BIND(LOOP);
6235 ldr(rscratch1, Address(post(ary1, wordSize)));
6236 tst(rscratch1, UPPER_BIT_MASK);
6237 br(NE, SET_RESULT);
6238 subs(len, len, wordSize);
6239 br(GE, LOOP);
6240 cmpw(len, -wordSize);
6241 br(EQ, DONE);
6242
6243 BIND(END);
6244 ldr(rscratch1, Address(ary1));
6245 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
6246 lslv(rscratch1, rscratch1, rscratch2);
6247 tst(rscratch1, UPPER_BIT_MASK);
6248 br(NE, SET_RESULT);
6249 b(DONE);
6250
6251 BIND(STUB);
6252 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
6253 assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
6254 address tpc1 = trampoline_call(count_pos);
6255 if (tpc1 == nullptr) {
6256 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
6257 postcond(pc() == badAddress);
6258 return nullptr;
6259 }
6260 b(DONE);
6261
6262 BIND(STUB_LONG);
6263 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
6264 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
6265 address tpc2 = trampoline_call(count_pos_long);
6266 if (tpc2 == nullptr) {
6267 DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
6268 postcond(pc() == badAddress);
6269 return nullptr;
6270 }
6271 b(DONE);
6272
6273 BIND(SET_RESULT);
6274
6275 add(len, len, wordSize);
6276 sub(result, result, len);
6277
6278 BIND(DONE);
6279 postcond(pc() != badAddress);
6280 return pc();
6281 }
6282
6283 // Clobbers: rscratch1, rscratch2, rflags
6284 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
6285 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
6286 Register tmp4, Register tmp5, Register result,
6287 Register cnt1, int elem_size) {
6288 Label DONE, SAME;
6289 Register tmp1 = rscratch1;
6290 Register tmp2 = rscratch2;
6291 int elem_per_word = wordSize/elem_size;
6292 int log_elem_size = exact_log2(elem_size);
6293 int klass_offset = arrayOopDesc::klass_offset_in_bytes();
6294 int length_offset = arrayOopDesc::length_offset_in_bytes();
6295 int base_offset
6296 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
6297 // When the length offset is not aligned to 8 bytes,
6298 // then we align it down. This is valid because the new
6299 // offset will always be the klass which is the same
6300 // for type arrays.
6301 int start_offset = align_down(length_offset, BytesPerWord);
6302 int extra_length = base_offset - start_offset;
6303 assert(start_offset == length_offset || start_offset == klass_offset,
6304 "start offset must be 8-byte-aligned or be the klass offset");
6305 assert(base_offset != start_offset, "must include the length field");
6306 extra_length = extra_length / elem_size; // We count in elements, not bytes.
6307 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
6308
6309 assert(elem_size == 1 || elem_size == 2, "must be char or byte");
6310 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6311
6312 #ifndef PRODUCT
6313 {
6314 const char kind = (elem_size == 2) ? 'U' : 'L';
6315 char comment[64];
6316 os::snprintf_checked(comment, sizeof comment, "array_equals%c{", kind);
6317 BLOCK_COMMENT(comment);
6318 }
6319 #endif
6320
6321 // if (a1 == a2)
6322 // return true;
6323 cmpoop(a1, a2); // May have read barriers for a1 and a2.
6324 br(EQ, SAME);
6325
6326 if (UseSimpleArrayEquals) {
6327 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
6328 // if (a1 == nullptr || a2 == nullptr)
6329 // return false;
6330 // a1 & a2 == 0 means (some-pointer is null) or
6331 // (very-rare-or-even-probably-impossible-pointer-values)
6332 // so, we can save one branch in most cases
6333 tst(a1, a2);
6334 mov(result, false);
6335 br(EQ, A_MIGHT_BE_NULL);
6336 // if (a1.length != a2.length)
6337 // return false;
6338 bind(A_IS_NOT_NULL);
6339 ldrw(cnt1, Address(a1, length_offset));
6340 ldrw(tmp5, Address(a2, length_offset));
6341 cmp(cnt1, tmp5);
6342 br(NE, DONE); // If lengths differ, return false
6343 // Increase loop counter by diff between base- and actual start-offset.
6344 addw(cnt1, cnt1, extra_length);
6345 lea(a1, Address(a1, start_offset));
6346 lea(a2, Address(a2, start_offset));
6347 // Check for short strings, i.e. smaller than wordSize.
6348 subs(cnt1, cnt1, elem_per_word);
6349 br(Assembler::LT, SHORT);
6350 // Main 8 byte comparison loop.
6351 bind(NEXT_WORD); {
6352 ldr(tmp1, Address(post(a1, wordSize)));
6353 ldr(tmp2, Address(post(a2, wordSize)));
6354 subs(cnt1, cnt1, elem_per_word);
6355 eor(tmp5, tmp1, tmp2);
6356 cbnz(tmp5, DONE);
6357 } br(GT, NEXT_WORD);
6358 // Last longword. In the case where length == 4 we compare the
6359 // same longword twice, but that's still faster than another
6360 // conditional branch.
6361 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6362 // length == 4.
6363 if (log_elem_size > 0)
6364 lsl(cnt1, cnt1, log_elem_size);
6365 ldr(tmp3, Address(a1, cnt1));
6366 ldr(tmp4, Address(a2, cnt1));
6367 eor(tmp5, tmp3, tmp4);
6368 cbnz(tmp5, DONE);
6369 b(SAME);
6370 bind(A_MIGHT_BE_NULL);
6371 // in case both a1 and a2 are not-null, proceed with loads
6372 cbz(a1, DONE);
6373 cbz(a2, DONE);
6374 b(A_IS_NOT_NULL);
6375 bind(SHORT);
6376
6377 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
6378 {
6379 ldrw(tmp1, Address(post(a1, 4)));
6380 ldrw(tmp2, Address(post(a2, 4)));
6381 eorw(tmp5, tmp1, tmp2);
6382 cbnzw(tmp5, DONE);
6383 }
6384 bind(TAIL03);
6385 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
6386 {
6387 ldrh(tmp3, Address(post(a1, 2)));
6388 ldrh(tmp4, Address(post(a2, 2)));
6389 eorw(tmp5, tmp3, tmp4);
6390 cbnzw(tmp5, DONE);
6391 }
6392 bind(TAIL01);
6393 if (elem_size == 1) { // Only needed when comparing byte arrays.
6394 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6395 {
6396 ldrb(tmp1, a1);
6397 ldrb(tmp2, a2);
6398 eorw(tmp5, tmp1, tmp2);
6399 cbnzw(tmp5, DONE);
6400 }
6401 }
6402 } else {
6403 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
6404 CSET_EQ, LAST_CHECK;
6405 mov(result, false);
6406 cbz(a1, DONE);
6407 ldrw(cnt1, Address(a1, length_offset));
6408 cbz(a2, DONE);
6409 ldrw(tmp5, Address(a2, length_offset));
6410 cmp(cnt1, tmp5);
6411 br(NE, DONE); // If lengths differ, return false
6412 // Increase loop counter by diff between base- and actual start-offset.
6413 addw(cnt1, cnt1, extra_length);
6414
6415 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
6416 // faster to perform another branch before comparing a1 and a2
6417 cmp(cnt1, (u1)elem_per_word);
6418 br(LE, SHORT); // short or same
6419 ldr(tmp3, Address(pre(a1, start_offset)));
6420 subs(zr, cnt1, stubBytesThreshold);
6421 br(GE, STUB);
6422 ldr(tmp4, Address(pre(a2, start_offset)));
6423 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6424
6425 // Main 16 byte comparison loop with 2 exits
6426 bind(NEXT_DWORD); {
6427 ldr(tmp1, Address(pre(a1, wordSize)));
6428 ldr(tmp2, Address(pre(a2, wordSize)));
6429 subs(cnt1, cnt1, 2 * elem_per_word);
6430 br(LE, TAIL);
6431 eor(tmp4, tmp3, tmp4);
6432 cbnz(tmp4, DONE);
6433 ldr(tmp3, Address(pre(a1, wordSize)));
6434 ldr(tmp4, Address(pre(a2, wordSize)));
6435 cmp(cnt1, (u1)elem_per_word);
6436 br(LE, TAIL2);
6437 cmp(tmp1, tmp2);
6438 } br(EQ, NEXT_DWORD);
6439 b(DONE);
6440
6441 bind(TAIL);
6442 eor(tmp4, tmp3, tmp4);
6443 eor(tmp2, tmp1, tmp2);
6444 lslv(tmp2, tmp2, tmp5);
6445 orr(tmp5, tmp4, tmp2);
6446 cmp(tmp5, zr);
6447 b(CSET_EQ);
6448
6449 bind(TAIL2);
6450 eor(tmp2, tmp1, tmp2);
6451 cbnz(tmp2, DONE);
6452 b(LAST_CHECK);
6453
6454 bind(STUB);
6455 ldr(tmp4, Address(pre(a2, start_offset)));
6456 if (elem_size == 2) { // convert to byte counter
6457 lsl(cnt1, cnt1, 1);
6458 }
6459 eor(tmp5, tmp3, tmp4);
6460 cbnz(tmp5, DONE);
6461 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
6462 assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
6463 address tpc = trampoline_call(stub);
6464 if (tpc == nullptr) {
6465 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
6466 postcond(pc() == badAddress);
6467 return nullptr;
6468 }
6469 b(DONE);
6470
6471 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
6472 // so, if a2 == null => return false(0), else return true, so we can return a2
6473 mov(result, a2);
6474 b(DONE);
6475 bind(SHORT);
6476 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6477 ldr(tmp3, Address(a1, start_offset));
6478 ldr(tmp4, Address(a2, start_offset));
6479 bind(LAST_CHECK);
6480 eor(tmp4, tmp3, tmp4);
6481 lslv(tmp5, tmp4, tmp5);
6482 cmp(tmp5, zr);
6483 bind(CSET_EQ);
6484 cset(result, EQ);
6485 b(DONE);
6486 }
6487
6488 bind(SAME);
6489 mov(result, true);
6490 // That's it.
6491 bind(DONE);
6492
6493 BLOCK_COMMENT("} array_equals");
6494 postcond(pc() != badAddress);
6495 return pc();
6496 }
6497
6498 // Compare Strings
6499
6500 // For Strings we're passed the address of the first characters in a1
6501 // and a2 and the length in cnt1.
6502 // There are two implementations. For arrays >= 8 bytes, all
6503 // comparisons (including the final one, which may overlap) are
6504 // performed 8 bytes at a time. For strings < 8 bytes, we compare a
6505 // halfword, then a short, and then a byte.
6506
6507 void MacroAssembler::string_equals(Register a1, Register a2,
6508 Register result, Register cnt1)
6509 {
6510 Label SAME, DONE, SHORT, NEXT_WORD;
6511 Register tmp1 = rscratch1;
6512 Register tmp2 = rscratch2;
6513
6514 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6515
6516 #ifndef PRODUCT
6517 {
6518 char comment[64];
6519 os::snprintf_checked(comment, sizeof comment, "{string_equalsL");
6520 BLOCK_COMMENT(comment);
6521 }
6522 #endif
6523
6524 mov(result, false);
6525
6526 // Check for short strings, i.e. smaller than wordSize.
6527 subs(cnt1, cnt1, wordSize);
6528 br(Assembler::LT, SHORT);
6529 // Main 8 byte comparison loop.
6530 bind(NEXT_WORD); {
6531 ldr(tmp1, Address(post(a1, wordSize)));
6532 ldr(tmp2, Address(post(a2, wordSize)));
6533 subs(cnt1, cnt1, wordSize);
6534 eor(tmp1, tmp1, tmp2);
6535 cbnz(tmp1, DONE);
6536 } br(GT, NEXT_WORD);
6537 // Last longword. In the case where length == 4 we compare the
6538 // same longword twice, but that's still faster than another
6539 // conditional branch.
6540 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6541 // length == 4.
6542 ldr(tmp1, Address(a1, cnt1));
6543 ldr(tmp2, Address(a2, cnt1));
6544 eor(tmp2, tmp1, tmp2);
6545 cbnz(tmp2, DONE);
6546 b(SAME);
6547
6548 bind(SHORT);
6549 Label TAIL03, TAIL01;
6550
6551 tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
6552 {
6553 ldrw(tmp1, Address(post(a1, 4)));
6554 ldrw(tmp2, Address(post(a2, 4)));
6555 eorw(tmp1, tmp1, tmp2);
6556 cbnzw(tmp1, DONE);
6557 }
6558 bind(TAIL03);
6559 tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
6560 {
6561 ldrh(tmp1, Address(post(a1, 2)));
6562 ldrh(tmp2, Address(post(a2, 2)));
6563 eorw(tmp1, tmp1, tmp2);
6564 cbnzw(tmp1, DONE);
6565 }
6566 bind(TAIL01);
6567 tbz(cnt1, 0, SAME); // 0-1 bytes left.
6568 {
6569 ldrb(tmp1, a1);
6570 ldrb(tmp2, a2);
6571 eorw(tmp1, tmp1, tmp2);
6572 cbnzw(tmp1, DONE);
6573 }
6574 // Arrays are equal.
6575 bind(SAME);
6576 mov(result, true);
6577
6578 // That's it.
6579 bind(DONE);
6580 BLOCK_COMMENT("} string_equals");
6581 }
6582
6583
6584 // The size of the blocks erased by the zero_blocks stub. We must
6585 // handle anything smaller than this ourselves in zero_words().
6586 const int MacroAssembler::zero_words_block_size = 8;
6587
6588 // zero_words() is used by C2 ClearArray patterns and by
6589 // C1_MacroAssembler. It is as small as possible, handling small word
6590 // counts locally and delegating anything larger to the zero_blocks
6591 // stub. It is expanded many times in compiled code, so it is
6592 // important to keep it short.
6593
6594 // ptr: Address of a buffer to be zeroed.
6595 // cnt: Count in HeapWords.
6596 //
6597 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
6598 address MacroAssembler::zero_words(Register ptr, Register cnt)
6599 {
6600 assert(is_power_of_2(zero_words_block_size), "adjust this");
6601
6602 BLOCK_COMMENT("zero_words {");
6603 assert(ptr == r10 && cnt == r11, "mismatch in register usage");
6604 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6605 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6606
6607 subs(rscratch1, cnt, zero_words_block_size);
6608 Label around;
6609 br(LO, around);
6610 {
6611 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6612 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6613 // Make sure this is a C2 compilation. C1 allocates space only for
6614 // trampoline stubs generated by Call LIR ops, and in any case it
6615 // makes sense for a C1 compilation task to proceed as quickly as
6616 // possible.
6617 CompileTask* task;
6618 if (StubRoutines::aarch64::complete()
6619 && Thread::current()->is_Compiler_thread()
6620 && (task = ciEnv::current()->task())
6621 && is_c2_compile(task->comp_level())) {
6622 address tpc = trampoline_call(zero_blocks);
6623 if (tpc == nullptr) {
6624 DEBUG_ONLY(reset_labels(around));
6625 return nullptr;
6626 }
6627 } else {
6628 far_call(zero_blocks);
6629 }
6630 }
6631 bind(around);
6632
6633 // We have a few words left to do. zero_blocks has adjusted r10 and r11
6634 // for us.
6635 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
6636 Label l;
6637 tbz(cnt, exact_log2(i), l);
6638 for (int j = 0; j < i; j += 2) {
6639 stp(zr, zr, post(ptr, 2 * BytesPerWord));
6640 }
6641 bind(l);
6642 }
6643 {
6644 Label l;
6645 tbz(cnt, 0, l);
6646 str(zr, Address(ptr));
6647 bind(l);
6648 }
6649
6650 BLOCK_COMMENT("} zero_words");
6651 return pc();
6652 }
6653
6654 // base: Address of a buffer to be zeroed, 8 bytes aligned.
6655 // cnt: Immediate count in HeapWords.
6656 //
6657 // r10, r11, rscratch1, and rscratch2 are clobbered.
6658 address MacroAssembler::zero_words(Register base, uint64_t cnt)
6659 {
6660 assert(wordSize <= BlockZeroingLowLimit,
6661 "increase BlockZeroingLowLimit");
6662 address result = nullptr;
6663 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
6664 #ifndef PRODUCT
6665 {
6666 char buf[64];
6667 os::snprintf_checked(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
6668 BLOCK_COMMENT(buf);
6669 }
6670 #endif
6671 if (cnt >= 16) {
6672 uint64_t loops = cnt/16;
6673 if (loops > 1) {
6674 mov(rscratch2, loops - 1);
6675 }
6676 {
6677 Label loop;
6678 bind(loop);
6679 for (int i = 0; i < 16; i += 2) {
6680 stp(zr, zr, Address(base, i * BytesPerWord));
6681 }
6682 add(base, base, 16 * BytesPerWord);
6683 if (loops > 1) {
6684 subs(rscratch2, rscratch2, 1);
6685 br(GE, loop);
6686 }
6687 }
6688 }
6689 cnt %= 16;
6690 int i = cnt & 1; // store any odd word to start
6691 if (i) str(zr, Address(base));
6692 for (; i < (int)cnt; i += 2) {
6693 stp(zr, zr, Address(base, i * wordSize));
6694 }
6695 BLOCK_COMMENT("} zero_words");
6696 result = pc();
6697 } else {
6698 mov(r10, base); mov(r11, cnt);
6699 result = zero_words(r10, r11);
6700 }
6701 return result;
6702 }
6703
6704 // Zero blocks of memory by using DC ZVA.
6705 //
6706 // Aligns the base address first sufficiently for DC ZVA, then uses
6707 // DC ZVA repeatedly for every full block. cnt is the size to be
6708 // zeroed in HeapWords. Returns the count of words left to be zeroed
6709 // in cnt.
6710 //
6711 // NOTE: This is intended to be used in the zero_blocks() stub. If
6712 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
6713 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
6714 Register tmp = rscratch1;
6715 Register tmp2 = rscratch2;
6716 int zva_length = VM_Version::zva_length();
6717 Label initial_table_end, loop_zva;
6718 Label fini;
6719
6720 // Base must be 16 byte aligned. If not just return and let caller handle it
6721 tst(base, 0x0f);
6722 br(Assembler::NE, fini);
6723 // Align base with ZVA length.
6724 neg(tmp, base);
6725 andr(tmp, tmp, zva_length - 1);
6726
6727 // tmp: the number of bytes to be filled to align the base with ZVA length.
6728 add(base, base, tmp);
6729 sub(cnt, cnt, tmp, Assembler::ASR, 3);
6730 adr(tmp2, initial_table_end);
6731 sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
6732 br(tmp2);
6733
6734 for (int i = -zva_length + 16; i < 0; i += 16)
6735 stp(zr, zr, Address(base, i));
6736 bind(initial_table_end);
6737
6738 sub(cnt, cnt, zva_length >> 3);
6739 bind(loop_zva);
6740 dc(Assembler::ZVA, base);
6741 subs(cnt, cnt, zva_length >> 3);
6742 add(base, base, zva_length);
6743 br(Assembler::GE, loop_zva);
6744 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6745 bind(fini);
6746 }
6747
6748 // base: Address of a buffer to be filled, 8 bytes aligned.
6749 // cnt: Count in 8-byte unit.
6750 // value: Value to be filled with.
6751 // base will point to the end of the buffer after filling.
6752 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6753 {
6754 // Algorithm:
6755 //
6756 // if (cnt == 0) {
6757 // return;
6758 // }
6759 // if ((p & 8) != 0) {
6760 // *p++ = v;
6761 // }
6762 //
6763 // scratch1 = cnt & 14;
6764 // cnt -= scratch1;
6765 // p += scratch1;
6766 // switch (scratch1 / 2) {
6767 // do {
6768 // cnt -= 16;
6769 // p[-16] = v;
6770 // p[-15] = v;
6771 // case 7:
6772 // p[-14] = v;
6773 // p[-13] = v;
6774 // case 6:
6775 // p[-12] = v;
6776 // p[-11] = v;
6777 // // ...
6778 // case 1:
6779 // p[-2] = v;
6780 // p[-1] = v;
6781 // case 0:
6782 // p += 16;
6783 // } while (cnt);
6784 // }
6785 // if ((cnt & 1) == 1) {
6786 // *p++ = v;
6787 // }
6788
6789 assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6790
6791 Label fini, skip, entry, loop;
6792 const int unroll = 8; // Number of stp instructions we'll unroll
6793
6794 cbz(cnt, fini);
6795 tbz(base, 3, skip);
6796 str(value, Address(post(base, 8)));
6797 sub(cnt, cnt, 1);
6798 bind(skip);
6799
6800 andr(rscratch1, cnt, (unroll-1) * 2);
6801 sub(cnt, cnt, rscratch1);
6802 add(base, base, rscratch1, Assembler::LSL, 3);
6803 adr(rscratch2, entry);
6804 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6805 br(rscratch2);
6806
6807 bind(loop);
6808 add(base, base, unroll * 16);
6809 for (int i = -unroll; i < 0; i++)
6810 stp(value, value, Address(base, i * 16));
6811 bind(entry);
6812 subs(cnt, cnt, unroll * 2);
6813 br(Assembler::GE, loop);
6814
6815 tbz(cnt, 0, fini);
6816 str(value, Address(post(base, 8)));
6817 bind(fini);
6818 }
6819
6820 // Intrinsic for
6821 //
6822 // - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6823 // Encodes char[] to byte[] in ISO-8859-1
6824 //
6825 // - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6826 // Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
6827 //
6828 // - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6829 // Encodes char[] to byte[] in ASCII
6830 //
6831 // This version always returns the number of characters copied, and does not
6832 // clobber the 'len' register. A successful copy will complete with the post-
6833 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
6834 // post-condition: 0 <= 'res' < 'len'.
6835 //
6836 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6837 // degrade performance (on Ampere Altra - Neoverse N1), to an extent
6838 // beyond the acceptable, even though the footprint would be smaller.
6839 // Using 'umaxv' in the ASCII-case comes with a small penalty but does
6840 // avoid additional bloat.
6841 //
6842 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6843 void MacroAssembler::encode_iso_array(Register src, Register dst,
6844 Register len, Register res, bool ascii,
6845 FloatRegister vtmp0, FloatRegister vtmp1,
6846 FloatRegister vtmp2, FloatRegister vtmp3,
6847 FloatRegister vtmp4, FloatRegister vtmp5)
6848 {
6849 Register cnt = res;
6850 Register max = rscratch1;
6851 Register chk = rscratch2;
6852
6853 prfm(Address(src), PLDL1STRM);
6854 movw(cnt, len);
6855
6856 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
6857
6858 Label LOOP_32, DONE_32, FAIL_32;
6859
6860 BIND(LOOP_32);
6861 {
6862 cmpw(cnt, 32);
6863 br(LT, DONE_32);
6864 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6865 // Extract lower bytes.
6866 FloatRegister vlo0 = vtmp4;
6867 FloatRegister vlo1 = vtmp5;
6868 uzp1(vlo0, T16B, vtmp0, vtmp1);
6869 uzp1(vlo1, T16B, vtmp2, vtmp3);
6870 // Merge bits...
6871 orr(vtmp0, T16B, vtmp0, vtmp1);
6872 orr(vtmp2, T16B, vtmp2, vtmp3);
6873 // Extract merged upper bytes.
6874 FloatRegister vhix = vtmp0;
6875 uzp2(vhix, T16B, vtmp0, vtmp2);
6876 // ISO-check on hi-parts (all zero).
6877 // ASCII-check on lo-parts (no sign).
6878 FloatRegister vlox = vtmp1; // Merge lower bytes.
6879 ASCII(orr(vlox, T16B, vlo0, vlo1));
6880 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox));
6881 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox));
6882 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0));
6883 ASCII(orr(chk, chk, max));
6884 cbnz(chk, FAIL_32);
6885 subw(cnt, cnt, 32);
6886 st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6887 b(LOOP_32);
6888 }
6889 BIND(FAIL_32);
6890 sub(src, src, 64);
6891 BIND(DONE_32);
6892
6893 Label LOOP_8, SKIP_8;
6894
6895 BIND(LOOP_8);
6896 {
6897 cmpw(cnt, 8);
6898 br(LT, SKIP_8);
6899 FloatRegister vhi = vtmp0;
6900 FloatRegister vlo = vtmp1;
6901 ld1(vtmp3, T8H, src);
6902 uzp1(vlo, T16B, vtmp3, vtmp3);
6903 uzp2(vhi, T16B, vtmp3, vtmp3);
6904 // ISO-check on hi-parts (all zero).
6905 // ASCII-check on lo-parts (no sign).
6906 ASCII(cm(LT, vtmp2, T16B, vlo));
6907 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2));
6908 ASCII(umov(max, vtmp2, B, 0));
6909 ASCII(orr(chk, chk, max));
6910 cbnz(chk, SKIP_8);
6911
6912 strd(vlo, Address(post(dst, 8)));
6913 subw(cnt, cnt, 8);
6914 add(src, src, 16);
6915 b(LOOP_8);
6916 }
6917 BIND(SKIP_8);
6918
6919 #undef ASCII
6920
6921 Label LOOP, DONE;
6922
6923 cbz(cnt, DONE);
6924 BIND(LOOP);
6925 {
6926 Register chr = rscratch1;
6927 ldrh(chr, Address(post(src, 2)));
6928 tst(chr, ascii ? 0xff80 : 0xff00);
6929 br(NE, DONE);
6930 strb(chr, Address(post(dst, 1)));
6931 subs(cnt, cnt, 1);
6932 br(GT, LOOP);
6933 }
6934 BIND(DONE);
6935 // Return index where we stopped.
6936 subw(res, len, cnt);
6937 }
6938
6939 // Inflate byte[] array to char[].
6940 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6941 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6942 FloatRegister vtmp1, FloatRegister vtmp2,
6943 FloatRegister vtmp3, Register tmp4) {
6944 Label big, done, after_init, to_stub;
6945
6946 assert_different_registers(src, dst, len, tmp4, rscratch1);
6947
6948 fmovd(vtmp1, 0.0);
6949 lsrw(tmp4, len, 3);
6950 bind(after_init);
6951 cbnzw(tmp4, big);
6952 // Short string: less than 8 bytes.
6953 {
6954 Label loop, tiny;
6955
6956 cmpw(len, 4);
6957 br(LT, tiny);
6958 // Use SIMD to do 4 bytes.
6959 ldrs(vtmp2, post(src, 4));
6960 zip1(vtmp3, T8B, vtmp2, vtmp1);
6961 subw(len, len, 4);
6962 strd(vtmp3, post(dst, 8));
6963
6964 cbzw(len, done);
6965
6966 // Do the remaining bytes by steam.
6967 bind(loop);
6968 ldrb(tmp4, post(src, 1));
6969 strh(tmp4, post(dst, 2));
6970 subw(len, len, 1);
6971
6972 bind(tiny);
6973 cbnz(len, loop);
6974
6975 b(done);
6976 }
6977
6978 if (SoftwarePrefetchHintDistance >= 0) {
6979 bind(to_stub);
6980 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6981 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6982 address tpc = trampoline_call(stub);
6983 if (tpc == nullptr) {
6984 DEBUG_ONLY(reset_labels(big, done));
6985 postcond(pc() == badAddress);
6986 return nullptr;
6987 }
6988 b(after_init);
6989 }
6990
6991 // Unpack the bytes 8 at a time.
6992 bind(big);
6993 {
6994 Label loop, around, loop_last, loop_start;
6995
6996 if (SoftwarePrefetchHintDistance >= 0) {
6997 const int large_loop_threshold = (64 + 16)/8;
6998 ldrd(vtmp2, post(src, 8));
6999 andw(len, len, 7);
7000 cmp(tmp4, (u1)large_loop_threshold);
7001 br(GE, to_stub);
7002 b(loop_start);
7003
7004 bind(loop);
7005 ldrd(vtmp2, post(src, 8));
7006 bind(loop_start);
7007 subs(tmp4, tmp4, 1);
7008 br(EQ, loop_last);
7009 zip1(vtmp2, T16B, vtmp2, vtmp1);
7010 ldrd(vtmp3, post(src, 8));
7011 st1(vtmp2, T8H, post(dst, 16));
7012 subs(tmp4, tmp4, 1);
7013 zip1(vtmp3, T16B, vtmp3, vtmp1);
7014 st1(vtmp3, T8H, post(dst, 16));
7015 br(NE, loop);
7016 b(around);
7017 bind(loop_last);
7018 zip1(vtmp2, T16B, vtmp2, vtmp1);
7019 st1(vtmp2, T8H, post(dst, 16));
7020 bind(around);
7021 cbz(len, done);
7022 } else {
7023 andw(len, len, 7);
7024 bind(loop);
7025 ldrd(vtmp2, post(src, 8));
7026 sub(tmp4, tmp4, 1);
7027 zip1(vtmp3, T16B, vtmp2, vtmp1);
7028 st1(vtmp3, T8H, post(dst, 16));
7029 cbnz(tmp4, loop);
7030 }
7031 }
7032
7033 // Do the tail of up to 8 bytes.
7034 add(src, src, len);
7035 ldrd(vtmp3, Address(src, -8));
7036 add(dst, dst, len, ext::uxtw, 1);
7037 zip1(vtmp3, T16B, vtmp3, vtmp1);
7038 strq(vtmp3, Address(dst, -16));
7039
7040 bind(done);
7041 postcond(pc() != badAddress);
7042 return pc();
7043 }
7044
7045 // Compress char[] array to byte[].
7046 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
7047 // Return the array length if every element in array can be encoded,
7048 // otherwise, the index of first non-latin1 (> 0xff) character.
7049 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
7050 Register res,
7051 FloatRegister tmp0, FloatRegister tmp1,
7052 FloatRegister tmp2, FloatRegister tmp3,
7053 FloatRegister tmp4, FloatRegister tmp5) {
7054 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
7055 }
7056
7057 // java.math.round(double a)
7058 // Returns the closest long to the argument, with ties rounding to
7059 // positive infinity. This requires some fiddling for corner
7060 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
7061 void MacroAssembler::java_round_double(Register dst, FloatRegister src,
7062 FloatRegister ftmp) {
7063 Label DONE;
7064 BLOCK_COMMENT("java_round_double: { ");
7065 fmovd(rscratch1, src);
7066 // Use RoundToNearestTiesAway unless src small and -ve.
7067 fcvtasd(dst, src);
7068 // Test if src >= 0 || abs(src) >= 0x1.0p52
7069 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
7070 mov(rscratch2, julong_cast(0x1.0p52));
7071 cmp(rscratch1, rscratch2);
7072 br(HS, DONE); {
7073 // src < 0 && abs(src) < 0x1.0p52
7074 // src may have a fractional part, so add 0.5
7075 fmovd(ftmp, 0.5);
7076 faddd(ftmp, src, ftmp);
7077 // Convert double to jlong, use RoundTowardsNegative
7078 fcvtmsd(dst, ftmp);
7079 }
7080 bind(DONE);
7081 BLOCK_COMMENT("} java_round_double");
7082 }
7083
7084 void MacroAssembler::java_round_float(Register dst, FloatRegister src,
7085 FloatRegister ftmp) {
7086 Label DONE;
7087 BLOCK_COMMENT("java_round_float: { ");
7088 fmovs(rscratch1, src);
7089 // Use RoundToNearestTiesAway unless src small and -ve.
7090 fcvtassw(dst, src);
7091 // Test if src >= 0 || abs(src) >= 0x1.0p23
7092 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
7093 mov(rscratch2, jint_cast(0x1.0p23f));
7094 cmp(rscratch1, rscratch2);
7095 br(HS, DONE); {
7096 // src < 0 && |src| < 0x1.0p23
7097 // src may have a fractional part, so add 0.5
7098 fmovs(ftmp, 0.5f);
7099 fadds(ftmp, src, ftmp);
7100 // Convert float to jint, use RoundTowardsNegative
7101 fcvtmssw(dst, ftmp);
7102 }
7103 bind(DONE);
7104 BLOCK_COMMENT("} java_round_float");
7105 }
7106
7107 // get_thread() can be called anywhere inside generated code so we
7108 // need to save whatever non-callee save context might get clobbered
7109 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
7110 // the call setup code.
7111 //
7112 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
7113 // On other systems, the helper is a usual C function.
7114 //
7115 void MacroAssembler::get_thread(Register dst) {
7116 RegSet saved_regs =
7117 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
7118 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
7119
7120 protect_return_address();
7121 push(saved_regs, sp);
7122
7123 mov(lr, ExternalAddress(CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)));
7124 blr(lr);
7125 if (dst != c_rarg0) {
7126 mov(dst, c_rarg0);
7127 }
7128
7129 pop(saved_regs, sp);
7130 authenticate_return_address();
7131 }
7132
7133 #ifdef COMPILER2
7134 // C2 compiled method's prolog code
7135 // Moved here from aarch64.ad to support Valhalla code below
7136 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
7137 if (C->clinit_barrier_on_entry()) {
7138 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
7139
7140 Label L_skip_barrier;
7141
7142 mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
7143 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
7144 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
7145 bind(L_skip_barrier);
7146 }
7147
7148 if (C->max_vector_size() > 0) {
7149 reinitialize_ptrue();
7150 }
7151
7152 int bangsize = C->output()->bang_size_in_bytes();
7153 if (C->output()->need_stack_bang(bangsize))
7154 generate_stack_overflow_check(bangsize);
7155
7156 // n.b. frame size includes space for return pc and rfp
7157 const long framesize = C->output()->frame_size_in_bytes();
7158 build_frame(framesize DEBUG_ONLY(COMMA sp_inc != 0));
7159
7160 if (C->needs_stack_repair()) {
7161 save_stack_increment(sp_inc, framesize);
7162 }
7163
7164 if (VerifyStackAtCalls) {
7165 Unimplemented();
7166 }
7167 }
7168 #endif // COMPILER2
7169
7170 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
7171 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
7172 // An inline type might be returned. If fields are in registers we
7173 // need to allocate an inline type instance and initialize it with
7174 // the value of the fields.
7175 Label skip;
7176 // We only need a new buffered inline type if a new one is not returned
7177 tbz(r0, 0, skip);
7178 int call_offset = -1;
7179
7180 // Be careful not to clobber r1-7 which hold returned fields
7181 // Also do not use callee-saved registers as these may be live in the interpreter
7182 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
7183
7184 // The following code is similar to allocate_instance but has some slight differences,
7185 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
7186 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
7187 Label slow_case;
7188 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
7189 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
7190
7191 if (vk != nullptr) {
7192 // Called from C1, where the return type is statically known.
7193 movptr(klass, (intptr_t)vk->get_InlineKlass());
7194 jint lh = vk->layout_helper();
7195 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
7196 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
7197 tlab_allocate(r0, noreg, lh, tmp1, tmp2, slow_case);
7198 } else {
7199 b(slow_case);
7200 }
7201 } else {
7202 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
7203 andr(klass, r0, -2);
7204 if (UseTLAB) {
7205 ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
7206 tst(tmp2, Klass::_lh_instance_slow_path_bit);
7207 br(Assembler::NE, slow_case);
7208 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
7209 } else {
7210 b(slow_case);
7211 }
7212 }
7213 if (UseTLAB) {
7214 // 2. Initialize buffered inline instance header
7215 Register buffer_obj = r0;
7216 if (UseCompactObjectHeaders) {
7217 ldr(rscratch1, Address(klass, Klass::prototype_header_offset()));
7218 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7219 } else {
7220 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
7221 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7222 store_klass_gap(buffer_obj, zr);
7223 if (vk == nullptr) {
7224 // store_klass corrupts klass, so save it for later use (interpreter case only).
7225 mov(tmp1, klass);
7226 }
7227 store_klass(buffer_obj, klass);
7228 klass = tmp1;
7229 }
7230 // 3. Initialize its fields with an inline class specific handler
7231 if (vk != nullptr) {
7232 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
7233 } else {
7234 ldr(tmp1, Address(klass, InlineKlass::adr_members_offset()));
7235 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
7236 blr(tmp1);
7237 }
7238
7239 membar(Assembler::StoreStore);
7240 b(skip);
7241 } else {
7242 // Must have already branched to slow_case above.
7243 DEBUG_ONLY(should_not_reach_here());
7244 }
7245 bind(slow_case);
7246 // We failed to allocate a new inline type, fall back to a runtime
7247 // call. Some oop field may be live in some registers but we can't
7248 // tell. That runtime call will take care of preserving them
7249 // across a GC if there's one.
7250 mov(r0, r0_preserved);
7251
7252 if (from_interpreter) {
7253 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
7254 } else {
7255 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
7256 call_offset = offset();
7257 }
7258 membar(Assembler::StoreStore);
7259
7260 bind(skip);
7261 return call_offset;
7262 }
7263
7264 // Move a value between registers/stack slots and update the reg_state
7265 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
7266 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
7267 if (reg_state[to->value()] == reg_written) {
7268 return true; // Already written
7269 }
7270
7271 if (from != to && bt != T_VOID) {
7272 if (reg_state[to->value()] == reg_readonly) {
7273 return false; // Not yet writable
7274 }
7275 if (from->is_reg()) {
7276 if (to->is_reg()) {
7277 if (from->is_Register() && to->is_Register()) {
7278 mov(to->as_Register(), from->as_Register());
7279 } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
7280 fmovd(to->as_FloatRegister(), from->as_FloatRegister());
7281 } else {
7282 ShouldNotReachHere();
7283 }
7284 } else {
7285 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7286 Address to_addr = Address(sp, st_off);
7287 if (from->is_FloatRegister()) {
7288 if (bt == T_DOUBLE) {
7289 strd(from->as_FloatRegister(), to_addr);
7290 } else {
7291 assert(bt == T_FLOAT, "must be float");
7292 strs(from->as_FloatRegister(), to_addr);
7293 }
7294 } else {
7295 str(from->as_Register(), to_addr);
7296 }
7297 }
7298 } else {
7299 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
7300 if (to->is_reg()) {
7301 if (to->is_FloatRegister()) {
7302 if (bt == T_DOUBLE) {
7303 ldrd(to->as_FloatRegister(), from_addr);
7304 } else {
7305 assert(bt == T_FLOAT, "must be float");
7306 ldrs(to->as_FloatRegister(), from_addr);
7307 }
7308 } else {
7309 ldr(to->as_Register(), from_addr);
7310 }
7311 } else {
7312 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7313 ldr(rscratch1, from_addr);
7314 str(rscratch1, Address(sp, st_off));
7315 }
7316 }
7317 }
7318
7319 // Update register states
7320 reg_state[from->value()] = reg_writable;
7321 reg_state[to->value()] = reg_written;
7322 return true;
7323 }
7324
7325 // Calculate the extra stack space required for packing or unpacking inline
7326 // args and adjust the stack pointer
7327 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
7328 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
7329 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
7330 assert(sp_inc > 0, "sanity");
7331
7332 // Save a copy of the FP and LR here for deoptimization patching and frame walking
7333 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7334
7335 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
7336 if (sp_inc < (1 << 9)) {
7337 sub(sp, sp, sp_inc); // Fits in an immediate
7338 } else {
7339 mov(rscratch1, sp_inc);
7340 sub(sp, sp, rscratch1);
7341 }
7342
7343 return sp_inc + 2 * wordSize; // Account for the FP/LR space
7344 }
7345
7346 // Read all fields from an inline type oop and store the values in registers/stack slots
7347 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
7348 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
7349 RegState reg_state[]) {
7350 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
7351 assert(from->is_valid(), "source must be valid");
7352 bool progress = false;
7353 #ifdef ASSERT
7354 const int start_offset = offset();
7355 #endif
7356
7357 Label L_null, L_notNull;
7358 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
7359 Register tmp1 = r10;
7360 Register tmp2 = r11;
7361
7362 #ifndef ASSERT
7363 RegSet clobbered_gp_regs = MacroAssembler::call_clobbered_gp_registers();
7364 assert(clobbered_gp_regs.contains(tmp1), "tmp1 must be saved explicitly if it's not a clobber");
7365 assert(clobbered_gp_regs.contains(tmp2), "tmp2 must be saved explicitly if it's not a clobber");
7366 assert(clobbered_gp_regs.contains(r14), "r14 must be saved explicitly if it's not a clobber");
7367 #endif
7368
7369 Register fromReg = noreg;
7370 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
7371 bool done = true;
7372 bool mark_done = true;
7373 VMReg toReg;
7374 BasicType bt;
7375 // Check if argument requires a null check
7376 bool null_check = false;
7377 VMReg nullCheckReg;
7378 while (stream.next(nullCheckReg, bt)) {
7379 if (sig->at(stream.sig_index())._offset == -1) {
7380 null_check = true;
7381 break;
7382 }
7383 }
7384 stream.reset(sig_index, to_index);
7385 while (stream.next(toReg, bt)) {
7386 assert(toReg->is_valid(), "destination must be valid");
7387 int idx = (int)toReg->value();
7388 if (reg_state[idx] == reg_readonly) {
7389 if (idx != from->value()) {
7390 mark_done = false;
7391 }
7392 done = false;
7393 continue;
7394 } else if (reg_state[idx] == reg_written) {
7395 continue;
7396 }
7397 assert(reg_state[idx] == reg_writable, "must be writable");
7398 reg_state[idx] = reg_written;
7399 progress = true;
7400
7401 if (fromReg == noreg) {
7402 if (from->is_reg()) {
7403 fromReg = from->as_Register();
7404 } else {
7405 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
7406 ldr(tmp1, Address(sp, st_off));
7407 fromReg = tmp1;
7408 }
7409 if (null_check) {
7410 // Nullable inline type argument, emit null check
7411 cbz(fromReg, L_null);
7412 }
7413 }
7414 int off = sig->at(stream.sig_index())._offset;
7415 if (off == -1) {
7416 assert(null_check, "Missing null check at");
7417 if (toReg->is_stack()) {
7418 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7419 mov(tmp2, 1);
7420 str(tmp2, Address(sp, st_off));
7421 } else {
7422 mov(toReg->as_Register(), 1);
7423 }
7424 continue;
7425 }
7426 if (sig->at(stream.sig_index())._vt_oop) {
7427 if (toReg->is_stack()) {
7428 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7429 str(fromReg, Address(sp, st_off));
7430 } else {
7431 mov(toReg->as_Register(), fromReg);
7432 }
7433 continue;
7434 }
7435 assert(off > 0, "offset in object should be positive");
7436 Address fromAddr = Address(fromReg, off);
7437 if (!toReg->is_FloatRegister()) {
7438 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
7439 if (is_reference_type(bt)) {
7440 load_heap_oop(dst, fromAddr, rscratch1, rscratch2);
7441 } else {
7442 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
7443 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
7444 }
7445 if (toReg->is_stack()) {
7446 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7447 str(dst, Address(sp, st_off));
7448 }
7449 } else if (bt == T_DOUBLE) {
7450 ldrd(toReg->as_FloatRegister(), fromAddr);
7451 } else {
7452 assert(bt == T_FLOAT, "must be float");
7453 ldrs(toReg->as_FloatRegister(), fromAddr);
7454 }
7455 }
7456 if (progress && null_check) {
7457 if (done) {
7458 b(L_notNull);
7459 bind(L_null);
7460 // Set null marker to zero to signal that the argument is null.
7461 // Also set all fields to zero since the runtime requires a canonical
7462 // representation of a flat null.
7463 stream.reset(sig_index, to_index);
7464 while (stream.next(toReg, bt)) {
7465 if (toReg->is_stack()) {
7466 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7467 str(zr, Address(sp, st_off));
7468 } else if (toReg->is_FloatRegister()) {
7469 mov(toReg->as_FloatRegister(), T2S, 0);
7470 } else {
7471 mov(toReg->as_Register(), zr);
7472 }
7473 }
7474 bind(L_notNull);
7475 } else {
7476 bind(L_null);
7477 }
7478 }
7479
7480 sig_index = stream.sig_index();
7481 to_index = stream.regs_index();
7482
7483 if (mark_done && reg_state[from->value()] != reg_written) {
7484 // This is okay because no one else will write to that slot
7485 reg_state[from->value()] = reg_writable;
7486 }
7487 from_index--;
7488 assert(progress || (start_offset == offset()), "should not emit code");
7489 return done;
7490 }
7491
7492 // Pack fields back into an inline type oop
7493 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
7494 VMRegPair* from, int from_count, int& from_index, VMReg to,
7495 RegState reg_state[], Register val_array) {
7496 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
7497 assert(to->is_valid(), "destination must be valid");
7498
7499 if (reg_state[to->value()] == reg_written) {
7500 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7501 return true; // Already written
7502 }
7503
7504 // The GC barrier expanded by store_heap_oop below may call into the
7505 // runtime so use callee-saved registers for any values that need to be
7506 // preserved. The GC barrier assembler should take care of saving the
7507 // Java argument registers.
7508 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
7509 Register val_obj_tmp = r21;
7510 Register from_reg_tmp = r22;
7511 Register tmp1 = r14;
7512 Register tmp2 = r13;
7513 Register tmp3 = r12;
7514 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
7515
7516 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
7517
7518 if (reg_state[to->value()] == reg_readonly) {
7519 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
7520 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7521 return false; // Not yet writable
7522 }
7523 val_obj = val_obj_tmp;
7524 }
7525
7526 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
7527 VMReg fromReg;
7528 BasicType bt;
7529 Label L_null;
7530 while (stream.next(fromReg, bt)) {
7531 assert(fromReg->is_valid(), "source must be valid");
7532 reg_state[fromReg->value()] = reg_writable;
7533
7534 int off = sig->at(stream.sig_index())._offset;
7535 if (off == -1) {
7536 // Nullable inline type argument, emit null check
7537 Label L_notNull;
7538 if (fromReg->is_stack()) {
7539 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7540 ldrb(tmp2, Address(sp, ld_off));
7541 cbnz(tmp2, L_notNull);
7542 } else {
7543 cbnz(fromReg->as_Register(), L_notNull);
7544 }
7545 mov(val_obj, 0);
7546 b(L_null);
7547 bind(L_notNull);
7548 continue;
7549 }
7550 if (sig->at(stream.sig_index())._vt_oop) {
7551 if (fromReg->is_stack()) {
7552 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7553 ldr(val_obj, Address(sp, ld_off));
7554 } else {
7555 mov(val_obj, fromReg->as_Register());
7556 }
7557 cbnz(val_obj, L_null);
7558 // get the buffer from the just allocated pool of buffers
7559 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
7560 load_heap_oop(val_obj, Address(val_array, index), rscratch1, rscratch2);
7561 continue;
7562 }
7563
7564 assert(off > 0, "offset in object should be positive");
7565 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
7566
7567 // Pack the scalarized field into the value object.
7568 Address dst(val_obj, off);
7569 if (!fromReg->is_FloatRegister()) {
7570 Register src;
7571 if (fromReg->is_stack()) {
7572 src = from_reg_tmp;
7573 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7574 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
7575 } else {
7576 src = fromReg->as_Register();
7577 }
7578 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
7579 if (is_reference_type(bt)) {
7580 // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
7581 mov(tmp3, val_obj);
7582 Address dst_with_tmp3(tmp3, off);
7583 store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
7584 } else {
7585 store_sized_value(dst, src, size_in_bytes);
7586 }
7587 } else if (bt == T_DOUBLE) {
7588 strd(fromReg->as_FloatRegister(), dst);
7589 } else {
7590 assert(bt == T_FLOAT, "must be float");
7591 strs(fromReg->as_FloatRegister(), dst);
7592 }
7593 }
7594 bind(L_null);
7595 sig_index = stream.sig_index();
7596 from_index = stream.regs_index();
7597
7598 assert(reg_state[to->value()] == reg_writable, "must have already been read");
7599 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
7600 assert(success, "to register must be writeable");
7601 return true;
7602 }
7603
7604 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
7605 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg();
7606 }
7607
7608 void MacroAssembler::cache_wb(Address line) {
7609 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
7610 assert(line.index() == noreg, "index should be noreg");
7611 assert(line.offset() == 0, "offset should be 0");
7612 // would like to assert this
7613 // assert(line._ext.shift == 0, "shift should be zero");
7614 if (VM_Version::supports_dcpop()) {
7615 // writeback using clear virtual address to point of persistence
7616 dc(Assembler::CVAP, line.base());
7617 } else {
7618 // no need to generate anything as Unsafe.writebackMemory should
7619 // never invoke this stub
7620 }
7621 }
7622
7623 void MacroAssembler::cache_wbsync(bool is_pre) {
7624 // we only need a barrier post sync
7625 if (!is_pre) {
7626 membar(Assembler::AnyAny);
7627 }
7628 }
7629
7630 void MacroAssembler::verify_sve_vector_length(Register tmp) {
7631 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) {
7632 return;
7633 }
7634 // Make sure that native code does not change SVE vector length.
7635 Label verify_ok;
7636 movw(tmp, zr);
7637 sve_inc(tmp, B);
7638 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
7639 br(EQ, verify_ok);
7640 stop("Error: SVE vector length has changed since jvm startup");
7641 bind(verify_ok);
7642 }
7643
7644 void MacroAssembler::verify_ptrue() {
7645 Label verify_ok;
7646 if (!UseSVE) {
7647 return;
7648 }
7649 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
7650 sve_dec(rscratch1, B);
7651 cbz(rscratch1, verify_ok);
7652 stop("Error: the preserved predicate register (p7) elements are not all true");
7653 bind(verify_ok);
7654 }
7655
7656 void MacroAssembler::safepoint_isb() {
7657 isb();
7658 #ifndef PRODUCT
7659 if (VerifyCrossModifyFence) {
7660 // Clear the thread state.
7661 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7662 }
7663 #endif
7664 }
7665
7666 #ifndef PRODUCT
7667 void MacroAssembler::verify_cross_modify_fence_not_required() {
7668 if (VerifyCrossModifyFence) {
7669 // Check if thread needs a cross modify fence.
7670 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7671 Label fence_not_required;
7672 cbz(rscratch1, fence_not_required);
7673 // If it does then fail.
7674 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
7675 mov(c_rarg0, rthread);
7676 blr(rscratch1);
7677 bind(fence_not_required);
7678 }
7679 }
7680 #endif
7681
7682 void MacroAssembler::spin_wait() {
7683 block_comment("spin_wait {");
7684 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
7685 switch (VM_Version::spin_wait_desc().inst()) {
7686 case SpinWait::NOP:
7687 nop();
7688 break;
7689 case SpinWait::ISB:
7690 isb();
7691 break;
7692 case SpinWait::YIELD:
7693 yield();
7694 break;
7695 case SpinWait::SB:
7696 assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
7697 sb();
7698 break;
7699 case SpinWait::WFET:
7700 spin_wait_wfet(VM_Version::spin_wait_desc().delay());
7701 break;
7702 default:
7703 ShouldNotReachHere();
7704 }
7705 }
7706 block_comment("}");
7707 }
7708
7709 void MacroAssembler::spin_wait_wfet(int delay_ns) {
7710 // The sequence assumes CNTFRQ_EL0 is fixed to 1GHz. The assumption is valid
7711 // starting from Armv8.6, according to the "D12.1.2 The system counter" of the
7712 // Arm Architecture Reference Manual for A-profile architecture version M.a.a.
7713 // This is sufficient because FEAT_WFXT is introduced from Armv8.6.
7714 Register target = rscratch1;
7715 Register current = rscratch2;
7716 get_cntvctss_el0(current);
7717 add(target, current, delay_ns);
7718
7719 Label L_wait_loop;
7720 bind(L_wait_loop);
7721
7722 wfet(target);
7723 get_cntvctss_el0(current);
7724
7725 cmp(current, target);
7726 br(LT, L_wait_loop);
7727
7728 sb();
7729 }
7730
7731 // Stack frame creation/removal
7732
7733 void MacroAssembler::enter(bool strip_ret_addr) {
7734 if (strip_ret_addr) {
7735 // Addresses can only be signed once. If there are multiple nested frames being created
7736 // in the same function, then the return address needs stripping first.
7737 strip_return_address();
7738 }
7739 protect_return_address();
7740 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7741 mov(rfp, sp);
7742 }
7743
7744 void MacroAssembler::leave() {
7745 mov(sp, rfp);
7746 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
7747 authenticate_return_address();
7748 }
7749
7750 // ROP Protection
7751 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
7752 // destroying stack frames or whenever directly loading/storing the LR to memory.
7753 // If ROP protection is not set then these functions are no-ops.
7754 // For more details on PAC see pauth_aarch64.hpp.
7755
7756 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
7757 // Uses value zero as the modifier.
7758 //
7759 void MacroAssembler::protect_return_address() {
7760 if (VM_Version::use_rop_protection()) {
7761 check_return_address();
7762 paciaz();
7763 }
7764 }
7765
7766 // Sign the return value in the given register. Use before updating the LR in the existing stack
7767 // frame for the current function.
7768 // Uses value zero as the modifier.
7769 //
7770 void MacroAssembler::protect_return_address(Register return_reg) {
7771 if (VM_Version::use_rop_protection()) {
7772 check_return_address(return_reg);
7773 paciza(return_reg);
7774 }
7775 }
7776
7777 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
7778 // Uses value zero as the modifier.
7779 //
7780 void MacroAssembler::authenticate_return_address() {
7781 if (VM_Version::use_rop_protection()) {
7782 autiaz();
7783 check_return_address();
7784 }
7785 }
7786
7787 // Authenticate the return value in the given register. Use before updating the LR in the existing
7788 // stack frame for the current function.
7789 // Uses value zero as the modifier.
7790 //
7791 void MacroAssembler::authenticate_return_address(Register return_reg) {
7792 if (VM_Version::use_rop_protection()) {
7793 autiza(return_reg);
7794 check_return_address(return_reg);
7795 }
7796 }
7797
7798 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
7799 // there is no guaranteed way of authenticating the LR.
7800 //
7801 void MacroAssembler::strip_return_address() {
7802 if (VM_Version::use_rop_protection()) {
7803 xpaclri();
7804 }
7805 }
7806
7807 #ifndef PRODUCT
7808 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
7809 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
7810 // it is difficult to debug back to the callee function.
7811 // This function simply loads from the address in the given register.
7812 // Use directly after authentication to catch authentication failures.
7813 // Also use before signing to check that the pointer is valid and hasn't already been signed.
7814 //
7815 void MacroAssembler::check_return_address(Register return_reg) {
7816 if (VM_Version::use_rop_protection()) {
7817 ldr(zr, Address(return_reg));
7818 }
7819 }
7820 #endif
7821
7822 // The java_calling_convention describes stack locations as ideal slots on
7823 // a frame with no abi restrictions. Since we must observe abi restrictions
7824 // (like the placement of the register window) the slots must be biased by
7825 // the following value.
7826 static int reg2offset_in(VMReg r) {
7827 // Account for saved rfp and lr
7828 // This should really be in_preserve_stack_slots
7829 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
7830 }
7831
7832 static int reg2offset_out(VMReg r) {
7833 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
7834 }
7835
7836 // On 64bit we will store integer like items to the stack as
7837 // 64bits items (AArch64 ABI) even though java would only store
7838 // 32bits for a parameter. On 32bit it will simply be 32bits
7839 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
7840 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
7841 if (src.first()->is_stack()) {
7842 if (dst.first()->is_stack()) {
7843 // stack to stack
7844 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7845 str(tmp, Address(sp, reg2offset_out(dst.first())));
7846 } else {
7847 // stack to reg
7848 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7849 }
7850 } else if (dst.first()->is_stack()) {
7851 // reg to stack
7852 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7853 } else {
7854 if (dst.first() != src.first()) {
7855 sxtw(dst.first()->as_Register(), src.first()->as_Register());
7856 }
7857 }
7858 }
7859
7860 // An oop arg. Must pass a handle not the oop itself
7861 void MacroAssembler::object_move(
7862 OopMap* map,
7863 int oop_handle_offset,
7864 int framesize_in_slots,
7865 VMRegPair src,
7866 VMRegPair dst,
7867 bool is_receiver,
7868 int* receiver_offset) {
7869
7870 // must pass a handle. First figure out the location we use as a handle
7871
7872 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
7873
7874 // See if oop is null if it is we need no handle
7875
7876 if (src.first()->is_stack()) {
7877
7878 // Oop is already on the stack as an argument
7879 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
7880 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
7881 if (is_receiver) {
7882 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
7883 }
7884
7885 ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
7886 lea(rHandle, Address(rfp, reg2offset_in(src.first())));
7887 // conditionally move a null
7888 cmp(rscratch1, zr);
7889 csel(rHandle, zr, rHandle, Assembler::EQ);
7890 } else {
7891
7892 // Oop is in an a register we must store it to the space we reserve
7893 // on the stack for oop_handles and pass a handle if oop is non-null
7894
7895 const Register rOop = src.first()->as_Register();
7896 int oop_slot;
7897 if (rOop == j_rarg0)
7898 oop_slot = 0;
7899 else if (rOop == j_rarg1)
7900 oop_slot = 1;
7901 else if (rOop == j_rarg2)
7902 oop_slot = 2;
7903 else if (rOop == j_rarg3)
7904 oop_slot = 3;
7905 else if (rOop == j_rarg4)
7906 oop_slot = 4;
7907 else if (rOop == j_rarg5)
7908 oop_slot = 5;
7909 else if (rOop == j_rarg6)
7910 oop_slot = 6;
7911 else {
7912 assert(rOop == j_rarg7, "wrong register");
7913 oop_slot = 7;
7914 }
7915
7916 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
7917 int offset = oop_slot*VMRegImpl::stack_slot_size;
7918
7919 map->set_oop(VMRegImpl::stack2reg(oop_slot));
7920 // Store oop in handle area, may be null
7921 str(rOop, Address(sp, offset));
7922 if (is_receiver) {
7923 *receiver_offset = offset;
7924 }
7925
7926 cmp(rOop, zr);
7927 lea(rHandle, Address(sp, offset));
7928 // conditionally move a null
7929 csel(rHandle, zr, rHandle, Assembler::EQ);
7930 }
7931
7932 // If arg is on the stack then place it otherwise it is already in correct reg.
7933 if (dst.first()->is_stack()) {
7934 str(rHandle, Address(sp, reg2offset_out(dst.first())));
7935 }
7936 }
7937
7938 // A float arg may have to do float reg int reg conversion
7939 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
7940 if (src.first()->is_stack()) {
7941 if (dst.first()->is_stack()) {
7942 ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
7943 strw(tmp, Address(sp, reg2offset_out(dst.first())));
7944 } else {
7945 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7946 }
7947 } else if (src.first() != dst.first()) {
7948 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7949 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7950 else
7951 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7952 }
7953 }
7954
7955 // A long move
7956 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
7957 if (src.first()->is_stack()) {
7958 if (dst.first()->is_stack()) {
7959 // stack to stack
7960 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7961 str(tmp, Address(sp, reg2offset_out(dst.first())));
7962 } else {
7963 // stack to reg
7964 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7965 }
7966 } else if (dst.first()->is_stack()) {
7967 // reg to stack
7968 // Do we really have to sign extend???
7969 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
7970 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7971 } else {
7972 if (dst.first() != src.first()) {
7973 mov(dst.first()->as_Register(), src.first()->as_Register());
7974 }
7975 }
7976 }
7977
7978
7979 // A double move
7980 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
7981 if (src.first()->is_stack()) {
7982 if (dst.first()->is_stack()) {
7983 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7984 str(tmp, Address(sp, reg2offset_out(dst.first())));
7985 } else {
7986 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7987 }
7988 } else if (src.first() != dst.first()) {
7989 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7990 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7991 else
7992 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7993 }
7994 }
7995
7996 // Implements fast-locking.
7997 //
7998 // - obj: the object to be locked
7999 // - t1, t2, t3: temporary registers, will be destroyed
8000 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
8001 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
8002 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
8003
8004 Label push;
8005 const Register top = t1;
8006 const Register mark = t2;
8007 const Register t = t3;
8008
8009 // Preload the markWord. It is important that this is the first
8010 // instruction emitted as it is part of C1's null check semantics.
8011 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
8012
8013 if (UseObjectMonitorTable) {
8014 // Clear cache in case fast locking succeeds or we need to take the slow-path.
8015 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
8016 }
8017
8018 if (DiagnoseSyncOnValueBasedClasses != 0) {
8019 load_klass(t1, obj);
8020 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
8021 tst(t1, KlassFlags::_misc_is_value_based_class);
8022 br(Assembler::NE, slow);
8023 }
8024
8025 // Check if the lock-stack is full.
8026 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
8027 cmpw(top, (unsigned)LockStack::end_offset());
8028 br(Assembler::GE, slow);
8029
8030 // Check for recursion.
8031 subw(t, top, oopSize);
8032 ldr(t, Address(rthread, t));
8033 cmp(obj, t);
8034 br(Assembler::EQ, push);
8035
8036 // Check header for monitor (0b10).
8037 tst(mark, markWord::monitor_value);
8038 br(Assembler::NE, slow);
8039
8040 // Try to lock. Transition lock bits 0b01 => 0b00
8041 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
8042 orr(mark, mark, markWord::unlocked_value);
8043 // Mask inline_type bit such that we go to the slow path if object is an inline type
8044 andr(mark, mark, ~((int) markWord::inline_type_bit_in_place));
8045
8046 eor(t, mark, markWord::unlocked_value);
8047 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
8048 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
8049 br(Assembler::NE, slow);
8050
8051 bind(push);
8052 // After successful lock, push object on lock-stack.
8053 str(obj, Address(rthread, top));
8054 addw(top, top, oopSize);
8055 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
8056 }
8057
8058 // Implements fast-unlocking.
8059 //
8060 // - obj: the object to be unlocked
8061 // - t1, t2, t3: temporary registers
8062 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
8063 void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
8064 // cmpxchg clobbers rscratch1.
8065 assert_different_registers(obj, t1, t2, t3, rscratch1);
8066
8067 #ifdef ASSERT
8068 {
8069 // Check for lock-stack underflow.
8070 Label stack_ok;
8071 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
8072 cmpw(t1, (unsigned)LockStack::start_offset());
8073 br(Assembler::GE, stack_ok);
8074 STOP("Lock-stack underflow");
8075 bind(stack_ok);
8076 }
8077 #endif
8078
8079 Label unlocked, push_and_slow;
8080 const Register top = t1;
8081 const Register mark = t2;
8082 const Register t = t3;
8083
8084 // Check if obj is top of lock-stack.
8085 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
8086 subw(top, top, oopSize);
8087 ldr(t, Address(rthread, top));
8088 cmp(obj, t);
8089 br(Assembler::NE, slow);
8090
8091 // Pop lock-stack.
8092 DEBUG_ONLY(str(zr, Address(rthread, top));)
8093 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
8094
8095 // Check if recursive.
8096 subw(t, top, oopSize);
8097 ldr(t, Address(rthread, t));
8098 cmp(obj, t);
8099 br(Assembler::EQ, unlocked);
8100
8101 // Not recursive. Check header for monitor (0b10).
8102 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
8103 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
8104
8105 #ifdef ASSERT
8106 // Check header not unlocked (0b01).
8107 Label not_unlocked;
8108 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
8109 stop("fast_unlock already unlocked");
8110 bind(not_unlocked);
8111 #endif
8112
8113 // Try to unlock. Transition lock bits 0b00 => 0b01
8114 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
8115 orr(t, mark, markWord::unlocked_value);
8116 cmpxchg(obj, mark, t, Assembler::xword,
8117 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
8118 br(Assembler::EQ, unlocked);
8119
8120 bind(push_and_slow);
8121 // Restore lock-stack and handle the unlock in runtime.
8122 DEBUG_ONLY(str(obj, Address(rthread, top));)
8123 addw(top, top, oopSize);
8124 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
8125 b(slow);
8126
8127 bind(unlocked);
8128 }