1 /*
2 * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/debugInfoRec.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "interpreter/interp_masm.hpp"
39 #include "logging/log.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/method.inline.hpp"
44 #include "prims/methodHandles.hpp"
45 #include "runtime/continuation.hpp"
46 #include "runtime/continuationEntry.inline.hpp"
47 #include "runtime/globals.hpp"
48 #include "runtime/jniHandles.hpp"
49 #include "runtime/safepointMechanism.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/signature.hpp"
52 #include "runtime/stubRoutines.hpp"
53 #include "runtime/timerTrace.hpp"
54 #include "runtime/vframeArray.hpp"
55 #include "utilities/align.hpp"
56 #include "utilities/formatBuffer.hpp"
57 #include "vmreg_aarch64.inline.hpp"
58 #ifdef COMPILER1
59 #include "c1/c1_Runtime1.hpp"
60 #endif
61 #ifdef COMPILER2
62 #include "adfiles/ad_aarch64.hpp"
63 #include "opto/runtime.hpp"
64 #endif
65 #if INCLUDE_JVMCI
66 #include "jvmci/jvmciJavaClasses.hpp"
67 #endif
68
69 #define __ masm->
70
71 #ifdef PRODUCT
72 #define BLOCK_COMMENT(str) /* nothing */
73 #else
74 #define BLOCK_COMMENT(str) __ block_comment(str)
75 #endif
76
77 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
78
79 // FIXME -- this is used by C1
80 class RegisterSaver {
81 const bool _save_vectors;
82 public:
83 RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
84
85 OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
86 void restore_live_registers(MacroAssembler* masm);
87
88 // Offsets into the register save area
89 // Used by deoptimization when it is managing result register
90 // values on its own
91
92 int reg_offset_in_bytes(Register r);
93 int r0_offset_in_bytes() { return reg_offset_in_bytes(r0); }
94 int rscratch1_offset_in_bytes() { return reg_offset_in_bytes(rscratch1); }
95 int v0_offset_in_bytes();
96
97 // Total stack size in bytes for saving sve predicate registers.
98 int total_sve_predicate_in_bytes();
99
100 // Capture info about frame layout
101 // Note this is only correct when not saving full vectors.
102 enum layout {
103 fpu_state_off = 0,
104 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
105 // The frame sender code expects that rfp will be in
106 // the "natural" place and will override any oopMap
107 // setting for it. We must therefore force the layout
108 // so that it agrees with the frame sender code.
109 r0_off = fpu_state_off + FPUStateSizeInWords,
110 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
111 return_off = rfp_off + Register::max_slots_per_register, // slot for return address
112 reg_save_size = return_off + Register::max_slots_per_register};
113
114 };
115
116 int RegisterSaver::reg_offset_in_bytes(Register r) {
117 // The integer registers are located above the floating point
118 // registers in the stack frame pushed by save_live_registers() so the
119 // offset depends on whether we are saving full vectors, and whether
120 // those vectors are NEON or SVE.
121
122 int slots_per_vect = FloatRegister::save_slots_per_register;
123
124 #if COMPILER2_OR_JVMCI
125 if (_save_vectors) {
126 slots_per_vect = FloatRegister::slots_per_neon_register;
127
128 #ifdef COMPILER2
129 if (Matcher::supports_scalable_vector()) {
130 slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
131 }
132 #endif
133 }
134 #endif
135
136 int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
137 return r0_offset + r->encoding() * wordSize;
138 }
139
140 int RegisterSaver::v0_offset_in_bytes() {
141 // The floating point registers are located above the predicate registers if
142 // they are present in the stack frame pushed by save_live_registers(). So the
143 // offset depends on the saved total predicate vectors in the stack frame.
144 return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
145 }
146
147 int RegisterSaver::total_sve_predicate_in_bytes() {
148 #ifdef COMPILER2
149 if (_save_vectors && Matcher::supports_scalable_vector()) {
150 return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
151 PRegister::number_of_registers;
152 }
153 #endif
154 return 0;
155 }
156
157 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
158 bool use_sve = false;
159 int sve_vector_size_in_bytes = 0;
160 int sve_vector_size_in_slots = 0;
161 int sve_predicate_size_in_slots = 0;
162 int total_predicate_in_bytes = total_sve_predicate_in_bytes();
163 int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
164
165 #ifdef COMPILER2
166 use_sve = Matcher::supports_scalable_vector();
167 if (use_sve) {
168 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
169 sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
170 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
171 }
172 #endif
173
174 #if COMPILER2_OR_JVMCI
175 if (_save_vectors) {
176 int extra_save_slots_per_register = 0;
177 // Save upper half of vector registers
178 if (use_sve) {
179 extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
180 } else {
181 extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
182 }
183 int extra_vector_bytes = extra_save_slots_per_register *
184 VMRegImpl::stack_slot_size *
185 FloatRegister::number_of_registers;
186 additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
187 }
188 #else
189 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
190 #endif
191
192 int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
193 reg_save_size * BytesPerInt, 16);
194 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
195 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
196 // The caller will allocate additional_frame_words
197 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
198 // CodeBlob frame size is in words.
199 int frame_size_in_words = frame_size_in_bytes / wordSize;
200 *total_frame_words = frame_size_in_words;
201
202 // Save Integer and Float registers.
203 __ enter();
204 __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
205
206 // Set an oopmap for the call site. This oopmap will map all
207 // oop-registers and debug-info registers as callee-saved. This
208 // will allow deoptimization at this safepoint to find all possible
209 // debug-info recordings, as well as let GC find all oops.
210
211 OopMapSet *oop_maps = new OopMapSet();
212 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
213
214 for (int i = 0; i < Register::number_of_registers; i++) {
215 Register r = as_Register(i);
216 if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
217 // SP offsets are in 4-byte words.
218 // Register slots are 8 bytes wide, 32 floating-point registers.
219 int sp_offset = Register::max_slots_per_register * i +
220 FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
221 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
222 }
223 }
224
225 for (int i = 0; i < FloatRegister::number_of_registers; i++) {
226 FloatRegister r = as_FloatRegister(i);
227 int sp_offset = 0;
228 if (_save_vectors) {
229 sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
230 (FloatRegister::slots_per_neon_register * i);
231 } else {
232 sp_offset = FloatRegister::save_slots_per_register * i;
233 }
234 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
235 }
236
237 return oop_map;
238 }
239
240 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
241 #ifdef COMPILER2
242 __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
243 Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
244 #else
245 #if !INCLUDE_JVMCI
246 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
247 #endif
248 __ pop_CPU_state(_save_vectors);
249 #endif
250 __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
251 __ authenticate_return_address();
252 }
253
254 // Is vector's size (in bytes) bigger than a size saved by default?
255 // 8 bytes vector registers are saved by default on AArch64.
256 // The SVE supported min vector size is 8 bytes and we need to save
257 // predicate registers when the vector size is 8 bytes as well.
258 bool SharedRuntime::is_wide_vector(int size) {
259 return size > 8 || (UseSVE > 0 && size >= 8);
260 }
261
262 // ---------------------------------------------------------------------------
263 // Read the array of BasicTypes from a signature, and compute where the
264 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
265 // quantities. Values less than VMRegImpl::stack0 are registers, those above
266 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
267 // as framesizes are fixed.
268 // VMRegImpl::stack0 refers to the first slot 0(sp).
269 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
270 // Register up to Register::number_of_registers are the 64-bit
271 // integer registers.
272
273 // Note: the INPUTS in sig_bt are in units of Java argument words,
274 // which are 64-bit. The OUTPUTS are in 32-bit units.
275
276 // The Java calling convention is a "shifted" version of the C ABI.
277 // By skipping the first C ABI register we can call non-static jni
278 // methods with small numbers of arguments without having to shuffle
279 // the arguments at all. Since we control the java ABI we ought to at
280 // least get some advantage out of it.
281
282 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
283 VMRegPair *regs,
284 int total_args_passed) {
285
286 // Create the mapping between argument positions and
287 // registers.
288 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
289 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
290 };
291 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
292 j_farg0, j_farg1, j_farg2, j_farg3,
293 j_farg4, j_farg5, j_farg6, j_farg7
294 };
295
296
297 uint int_args = 0;
298 uint fp_args = 0;
299 uint stk_args = 0;
300
301 for (int i = 0; i < total_args_passed; i++) {
302 switch (sig_bt[i]) {
303 case T_BOOLEAN:
304 case T_CHAR:
305 case T_BYTE:
306 case T_SHORT:
307 case T_INT:
308 if (int_args < Argument::n_int_register_parameters_j) {
309 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
310 } else {
311 stk_args = align_up(stk_args, 2);
312 regs[i].set1(VMRegImpl::stack2reg(stk_args));
313 stk_args += 1;
314 }
315 break;
316 case T_VOID:
317 // halves of T_LONG or T_DOUBLE
318 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
319 regs[i].set_bad();
320 break;
321 case T_LONG:
322 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
323 // fall through
324 case T_OBJECT:
325 case T_ARRAY:
326 case T_ADDRESS:
327 if (int_args < Argument::n_int_register_parameters_j) {
328 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
329 } else {
330 stk_args = align_up(stk_args, 2);
331 regs[i].set2(VMRegImpl::stack2reg(stk_args));
332 stk_args += 2;
333 }
334 break;
335 case T_FLOAT:
336 if (fp_args < Argument::n_float_register_parameters_j) {
337 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
338 } else {
339 stk_args = align_up(stk_args, 2);
340 regs[i].set1(VMRegImpl::stack2reg(stk_args));
341 stk_args += 1;
342 }
343 break;
344 case T_DOUBLE:
345 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
346 if (fp_args < Argument::n_float_register_parameters_j) {
347 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
348 } else {
349 stk_args = align_up(stk_args, 2);
350 regs[i].set2(VMRegImpl::stack2reg(stk_args));
351 stk_args += 2;
352 }
353 break;
354 default:
355 ShouldNotReachHere();
356 break;
357 }
358 }
359
360 return stk_args;
361 }
362
363
364 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
365 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
366
367 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
368
369 // Create the mapping between argument positions and registers.
370
371 static const Register INT_ArgReg[java_return_convention_max_int] = {
372 r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
373 };
374
375 static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
376 j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
377 };
378
379 uint int_args = 0;
380 uint fp_args = 0;
381
382 for (int i = 0; i < total_args_passed; i++) {
383 switch (sig_bt[i]) {
384 case T_BOOLEAN:
385 case T_CHAR:
386 case T_BYTE:
387 case T_SHORT:
388 case T_INT:
389 if (int_args < SharedRuntime::java_return_convention_max_int) {
390 regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
391 int_args ++;
392 } else {
393 return -1;
394 }
395 break;
396 case T_VOID:
397 // halves of T_LONG or T_DOUBLE
398 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
399 regs[i].set_bad();
400 break;
401 case T_LONG:
402 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
403 // fall through
404 case T_OBJECT:
405 case T_ARRAY:
406 case T_ADDRESS:
407 // Should T_METADATA be added to java_calling_convention as well ?
408 case T_METADATA:
409 if (int_args < SharedRuntime::java_return_convention_max_int) {
410 regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
411 int_args ++;
412 } else {
413 return -1;
414 }
415 break;
416 case T_FLOAT:
417 if (fp_args < SharedRuntime::java_return_convention_max_float) {
418 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
419 fp_args ++;
420 } else {
421 return -1;
422 }
423 break;
424 case T_DOUBLE:
425 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
426 if (fp_args < SharedRuntime::java_return_convention_max_float) {
427 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
428 fp_args ++;
429 } else {
430 return -1;
431 }
432 break;
433 default:
434 ShouldNotReachHere();
435 break;
436 }
437 }
438
439 return int_args + fp_args;
440 }
441
442 // Patch the callers callsite with entry to compiled code if it exists.
443 static void patch_callers_callsite(MacroAssembler *masm) {
444 Label L;
445 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
446 __ cbz(rscratch1, L);
447
448 __ enter();
449 __ push_CPU_state();
450
451 // VM needs caller's callsite
452 // VM needs target method
453 // This needs to be a long call since we will relocate this adapter to
454 // the codeBuffer and it may not reach
455
456 #ifndef PRODUCT
457 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
458 #endif
459
460 __ mov(c_rarg0, rmethod);
461 __ mov(c_rarg1, lr);
462 __ authenticate_return_address(c_rarg1);
463 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
464 __ blr(rscratch1);
465
466 // Explicit isb required because fixup_callers_callsite may change the code
467 // stream.
468 __ safepoint_isb();
469
470 __ pop_CPU_state();
471 // restore sp
472 __ leave();
473 __ bind(L);
474 }
475
476 // For each inline type argument, sig includes the list of fields of
477 // the inline type. This utility function computes the number of
478 // arguments for the call if inline types are passed by reference (the
479 // calling convention the interpreter expects).
480 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
481 int total_args_passed = 0;
482 if (InlineTypePassFieldsAsArgs) {
483 for (int i = 0; i < sig_extended->length(); i++) {
484 BasicType bt = sig_extended->at(i)._bt;
485 if (bt == T_METADATA) {
486 // In sig_extended, an inline type argument starts with:
487 // T_METADATA, followed by the types of the fields of the
488 // inline type and T_VOID to mark the end of the value
489 // type. Inline types are flattened so, for instance, in the
490 // case of an inline type with an int field and an inline type
491 // field that itself has 2 fields, an int and a long:
492 // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
493 // slot for the T_LONG) T_VOID (inner inline type) T_VOID
494 // (outer inline type)
495 total_args_passed++;
496 int vt = 1;
497 do {
498 i++;
499 BasicType bt = sig_extended->at(i)._bt;
500 BasicType prev_bt = sig_extended->at(i-1)._bt;
501 if (bt == T_METADATA) {
502 vt++;
503 } else if (bt == T_VOID &&
504 prev_bt != T_LONG &&
505 prev_bt != T_DOUBLE) {
506 vt--;
507 }
508 } while (vt != 0);
509 } else {
510 total_args_passed++;
511 }
512 }
513 } else {
514 total_args_passed = sig_extended->length();
515 }
516 return total_args_passed;
517 }
518
519
520 static void gen_c2i_adapter_helper(MacroAssembler* masm,
521 BasicType bt,
522 BasicType prev_bt,
523 size_t size_in_bytes,
524 const VMRegPair& reg_pair,
525 const Address& to,
526 Register tmp1,
527 Register tmp2,
528 Register tmp3,
529 int extraspace,
530 bool is_oop) {
531 if (bt == T_VOID) {
532 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
533 return;
534 }
535
536 // Say 4 args:
537 // i st_off
538 // 0 32 T_LONG
539 // 1 24 T_VOID
540 // 2 16 T_OBJECT
541 // 3 8 T_BOOL
542 // - 0 return address
543 //
544 // However to make thing extra confusing. Because we can fit a Java long/double in
545 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
546 // leaves one slot empty and only stores to a single slot. In this case the
547 // slot that is occupied is the T_VOID slot. See I said it was confusing.
548
549 bool wide = (size_in_bytes == wordSize);
550 VMReg r_1 = reg_pair.first();
551 VMReg r_2 = reg_pair.second();
552 assert(r_2->is_valid() == wide, "invalid size");
553 if (!r_1->is_valid()) {
554 assert(!r_2->is_valid(), "");
555 return;
556 }
557
558 if (!r_1->is_FloatRegister()) {
559 Register val = r25;
560 if (r_1->is_stack()) {
561 // memory to memory use r25 (scratch registers is used by store_heap_oop)
562 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
563 __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
564 } else {
565 val = r_1->as_Register();
566 }
567 assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
568 if (is_oop) {
569 __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
570 } else {
571 __ store_sized_value(to, val, size_in_bytes);
572 }
573 } else {
574 if (wide) {
575 __ strd(r_1->as_FloatRegister(), to);
576 } else {
577 // only a float use just part of the slot
578 __ strs(r_1->as_FloatRegister(), to);
579 }
580 }
581 }
582
583 static void gen_c2i_adapter(MacroAssembler *masm,
584 const GrowableArray<SigEntry>* sig_extended,
585 const VMRegPair *regs,
586 bool requires_clinit_barrier,
587 address& c2i_no_clinit_check_entry,
588 Label& skip_fixup,
589 address start,
590 OopMapSet* oop_maps,
591 int& frame_complete,
592 int& frame_size_in_words,
593 bool alloc_inline_receiver) {
594 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
595 Label L_skip_barrier;
596
597 { // Bypass the barrier for non-static methods
598 __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
599 __ andsw(zr, rscratch1, JVM_ACC_STATIC);
600 __ br(Assembler::EQ, L_skip_barrier); // non-static
601 }
602
603 __ load_method_holder(rscratch2, rmethod);
604 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
605 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
606
607 __ bind(L_skip_barrier);
608 c2i_no_clinit_check_entry = __ pc();
609 }
610
611 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
612 bs->c2i_entry_barrier(masm);
613
614 // Before we get into the guts of the C2I adapter, see if we should be here
615 // at all. We've come from compiled code and are attempting to jump to the
616 // interpreter, which means the caller made a static call to get here
617 // (vcalls always get a compiled target if there is one). Check for a
618 // compiled target. If there is one, we need to patch the caller's call.
619 patch_callers_callsite(masm);
620
621 __ bind(skip_fixup);
622
623 // TODO 8366717 Is the comment about r13 correct? Isn't that r19_sender_sp?
624 // Name some registers to be used in the following code. We can use
625 // anything except r0-r7 which are arguments in the Java calling
626 // convention, rmethod (r12), and r13 which holds the outgoing sender
627 // SP for the interpreter.
628 // TODO 8366717 We need to make sure that buf_array, buf_oop (and potentially other long-life regs) are kept live in slowpath runtime calls in GC barriers
629 Register buf_array = r10; // Array of buffered inline types
630 Register buf_oop = r11; // Buffered inline type oop
631 Register tmp1 = r15;
632 Register tmp2 = r16;
633 Register tmp3 = r17;
634
635 if (InlineTypePassFieldsAsArgs) {
636 // Is there an inline type argument?
637 bool has_inline_argument = false;
638 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
639 has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
640 }
641 if (has_inline_argument) {
642 // There is at least an inline type argument: we're coming from
643 // compiled code so we have no buffers to back the inline types
644 // Allocate the buffers here with a runtime call.
645 // TODO 8366717 Do we need to save vectors here? They could be used as arg registers, right? Same on x64.
646 RegisterSaver reg_save(true /* save_vectors */);
647 OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
648
649 frame_complete = __ offset();
650 address the_pc = __ pc();
651
652 Label retaddr;
653 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
654
655 __ mov(c_rarg0, rthread);
656 __ mov(c_rarg1, rmethod);
657 __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
658
659 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
660 __ blr(rscratch1);
661 __ bind(retaddr);
662
663 oop_maps->add_gc_map(__ pc() - start, map);
664 __ reset_last_Java_frame(false);
665
666 reg_save.restore_live_registers(masm);
667
668 Label no_exception;
669 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
670 __ cbz(rscratch1, no_exception);
671
672 __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
673 __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
674 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
675
676 __ bind(no_exception);
677
678 // We get an array of objects from the runtime call
679 __ get_vm_result_oop(buf_array, rthread);
680 __ get_vm_result_metadata(rmethod, rthread); // TODO: required to keep the callee Method live?
681 }
682 }
683
684 // Since all args are passed on the stack, total_args_passed *
685 // Interpreter::stackElementSize is the space we need.
686
687 int total_args_passed = compute_total_args_passed_int(sig_extended);
688 int extraspace = total_args_passed * Interpreter::stackElementSize;
689
690 // stack is aligned, keep it that way
691 extraspace = align_up(extraspace, StackAlignmentInBytes);
692
693 // set senderSP value
694 __ mov(r19_sender_sp, sp);
695
696 __ sub(sp, sp, extraspace);
697
698 // Now write the args into the outgoing interpreter space
699
700 // next_arg_comp is the next argument from the compiler point of
701 // view (inline type fields are passed in registers/on the stack). In
702 // sig_extended, an inline type argument starts with: T_METADATA,
703 // followed by the types of the fields of the inline type and T_VOID
704 // to mark the end of the inline type. ignored counts the number of
705 // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
706 // used to get the buffer for that argument from the pool of buffers
707 // we allocated above and want to pass to the
708 // interpreter. next_arg_int is the next argument from the
709 // interpreter point of view (inline types are passed by reference).
710 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
711 next_arg_comp < sig_extended->length(); next_arg_comp++) {
712 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
713 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
714 BasicType bt = sig_extended->at(next_arg_comp)._bt;
715 int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
716 if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
717 int next_off = st_off - Interpreter::stackElementSize;
718 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
719 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
720 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
721 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
722 size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
723 next_arg_int++;
724 #ifdef ASSERT
725 if (bt == T_LONG || bt == T_DOUBLE) {
726 // Overwrite the unused slot with known junk
727 __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
728 __ str(rscratch1, Address(sp, st_off));
729 }
730 #endif /* ASSERT */
731 } else {
732 ignored++;
733 // get the buffer from the just allocated pool of buffers
734 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
735 __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
736 next_vt_arg++; next_arg_int++;
737 int vt = 1;
738 // write fields we get from compiled code in registers/stack
739 // slots to the buffer: we know we are done with that inline type
740 // argument when we hit the T_VOID that acts as an end of inline
741 // type delimiter for this inline type. Inline types are flattened
742 // so we might encounter embedded inline types. Each entry in
743 // sig_extended contains a field offset in the buffer.
744 Label L_null;
745 do {
746 next_arg_comp++;
747 BasicType bt = sig_extended->at(next_arg_comp)._bt;
748 BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
749 if (bt == T_METADATA) {
750 vt++;
751 ignored++;
752 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
753 vt--;
754 ignored++;
755 } else {
756 int off = sig_extended->at(next_arg_comp)._offset;
757 if (off == -1) {
758 // Nullable inline type argument, emit null check
759 VMReg reg = regs[next_arg_comp-ignored].first();
760 Label L_notNull;
761 if (reg->is_stack()) {
762 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
763 __ ldrb(tmp1, Address(sp, ld_off));
764 __ cbnz(tmp1, L_notNull);
765 } else {
766 __ cbnz(reg->as_Register(), L_notNull);
767 }
768 __ str(zr, Address(sp, st_off));
769 __ b(L_null);
770 __ bind(L_notNull);
771 continue;
772 }
773 assert(off > 0, "offset in object should be positive");
774 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
775 bool is_oop = is_reference_type(bt);
776 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
777 size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
778 }
779 } while (vt != 0);
780 // pass the buffer to the interpreter
781 __ str(buf_oop, Address(sp, st_off));
782 __ bind(L_null);
783 }
784 }
785
786 __ mov(esp, sp); // Interp expects args on caller's expression stack
787
788 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
789 __ br(rscratch1);
790 }
791
792 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
793
794
795 // Note: r19_sender_sp contains the senderSP on entry. We must
796 // preserve it since we may do a i2c -> c2i transition if we lose a
797 // race where compiled code goes non-entrant while we get args
798 // ready.
799
800 // Adapters are frameless.
801
802 // An i2c adapter is frameless because the *caller* frame, which is
803 // interpreted, routinely repairs its own esp (from
804 // interpreter_frame_last_sp), even if a callee has modified the
805 // stack pointer. It also recalculates and aligns sp.
806
807 // A c2i adapter is frameless because the *callee* frame, which is
808 // interpreted, routinely repairs its caller's sp (from sender_sp,
809 // which is set up via the senderSP register).
810
811 // In other words, if *either* the caller or callee is interpreted, we can
812 // get the stack pointer repaired after a call.
813
814 // This is why c2i and i2c adapters cannot be indefinitely composed.
815 // In particular, if a c2i adapter were to somehow call an i2c adapter,
816 // both caller and callee would be compiled methods, and neither would
817 // clean up the stack pointer changes performed by the two adapters.
818 // If this happens, control eventually transfers back to the compiled
819 // caller, but with an uncorrected stack, causing delayed havoc.
820
821 // Cut-out for having no stack args.
822 int comp_words_on_stack = 0;
823 if (comp_args_on_stack) {
824 comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
825 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
826 __ andr(sp, rscratch1, -16);
827 }
828
829 // Will jump to the compiled code just as if compiled code was doing it.
830 // Pre-load the register-jump target early, to schedule it better.
831 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
832
833 #if INCLUDE_JVMCI
834 if (EnableJVMCI) {
835 // check if this call should be routed towards a specific entry point
836 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
837 Label no_alternative_target;
838 __ cbz(rscratch2, no_alternative_target);
839 __ mov(rscratch1, rscratch2);
840 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
841 __ bind(no_alternative_target);
842 }
843 #endif // INCLUDE_JVMCI
844
845 int total_args_passed = sig->length();
846
847 // Now generate the shuffle code.
848 for (int i = 0; i < total_args_passed; i++) {
849 BasicType bt = sig->at(i)._bt;
850 if (bt == T_VOID) {
851 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
852 continue;
853 }
854
855 // Pick up 0, 1 or 2 words from SP+offset.
856 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
857
858 // Load in argument order going down.
859 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
860 // Point to interpreter value (vs. tag)
861 int next_off = ld_off - Interpreter::stackElementSize;
862 //
863 //
864 //
865 VMReg r_1 = regs[i].first();
866 VMReg r_2 = regs[i].second();
867 if (!r_1->is_valid()) {
868 assert(!r_2->is_valid(), "");
869 continue;
870 }
871 if (r_1->is_stack()) {
872 // Convert stack slot to an SP offset (+ wordSize to account for return address )
873 int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
874 if (!r_2->is_valid()) {
875 // sign extend???
876 __ ldrsw(rscratch2, Address(esp, ld_off));
877 __ str(rscratch2, Address(sp, st_off));
878 } else {
879 //
880 // We are using two optoregs. This can be either T_OBJECT,
881 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
882 // two slots but only uses one for thr T_LONG or T_DOUBLE case
883 // So we must adjust where to pick up the data to match the
884 // interpreter.
885 //
886 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
887 // are accessed as negative so LSW is at LOW address
888
889 // ld_off is MSW so get LSW
890 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
891 __ ldr(rscratch2, Address(esp, offset));
892 // st_off is LSW (i.e. reg.first())
893 __ str(rscratch2, Address(sp, st_off));
894 }
895 } else if (r_1->is_Register()) { // Register argument
896 Register r = r_1->as_Register();
897 if (r_2->is_valid()) {
898 //
899 // We are using two VMRegs. This can be either T_OBJECT,
900 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
901 // two slots but only uses one for thr T_LONG or T_DOUBLE case
902 // So we must adjust where to pick up the data to match the
903 // interpreter.
904
905 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
906
907 // this can be a misaligned move
908 __ ldr(r, Address(esp, offset));
909 } else {
910 // sign extend and use a full word?
911 __ ldrw(r, Address(esp, ld_off));
912 }
913 } else {
914 if (!r_2->is_valid()) {
915 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
916 } else {
917 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
918 }
919 }
920 }
921
922
923 __ mov(rscratch2, rscratch1);
924 __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
925 __ mov(rscratch1, rscratch2);
926
927 // 6243940 We might end up in handle_wrong_method if
928 // the callee is deoptimized as we race thru here. If that
929 // happens we don't want to take a safepoint because the
930 // caller frame will look interpreted and arguments are now
931 // "compiled" so it is much better to make this transition
932 // invisible to the stack walking code. Unfortunately if
933 // we try and find the callee by normal means a safepoint
934 // is possible. So we stash the desired callee in the thread
935 // and the vm will find there should this case occur.
936
937 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
938 __ br(rscratch1);
939 }
940
941 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
942 Register data = rscratch2;
943 __ ic_check(1 /* end_alignment */);
944 __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
945
946 // Method might have been compiled since the call site was patched to
947 // interpreted; if that is the case treat it as a miss so we can get
948 // the call site corrected.
949 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
950 __ cbz(rscratch1, skip_fixup);
951 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
952 }
953
954 // ---------------------------------------------------------------
955 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
956 int comp_args_on_stack,
957 const GrowableArray<SigEntry>* sig,
958 const VMRegPair* regs,
959 const GrowableArray<SigEntry>* sig_cc,
960 const VMRegPair* regs_cc,
961 const GrowableArray<SigEntry>* sig_cc_ro,
962 const VMRegPair* regs_cc_ro,
963 address entry_address[AdapterBlob::ENTRY_COUNT],
964 AdapterBlob*& new_adapter,
965 bool allocate_code_blob) {
966
967 entry_address[AdapterBlob::I2C] = __ pc();
968 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
969
970 // -------------------------------------------------------------------------
971 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
972 // to the interpreter. The args start out packed in the compiled layout. They
973 // need to be unpacked into the interpreter layout. This will almost always
974 // require some stack space. We grow the current (compiled) stack, then repack
975 // the args. We finally end in a jump to the generic interpreter entry point.
976 // On exit from the interpreter, the interpreter will restore our SP (lest the
977 // compiled code, which relies solely on SP and not FP, get sick).
978
979 entry_address[AdapterBlob::C2I_Unverified] = __ pc();
980 entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
981 Label skip_fixup;
982
983 gen_inline_cache_check(masm, skip_fixup);
984
985 OopMapSet* oop_maps = new OopMapSet();
986 int frame_complete = CodeOffsets::frame_never_safe;
987 int frame_size_in_words = 0;
988
989 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
990 entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
991 entry_address[AdapterBlob::C2I_Inline_RO] = __ pc();
992 if (regs_cc != regs_cc_ro) {
993 // No class init barrier needed because method is guaranteed to be non-static
994 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, entry_address[AdapterBlob::C2I_No_Clinit_Check],
995 skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
996 skip_fixup.reset();
997 }
998
999 // Scalarized c2i adapter
1000 entry_address[AdapterBlob::C2I] = __ pc();
1001 entry_address[AdapterBlob::C2I_Inline] = __ pc();
1002 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1003 skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1004
1005 // Non-scalarized c2i adapter
1006 if (regs != regs_cc) {
1007 entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
1008 Label inline_entry_skip_fixup;
1009 gen_inline_cache_check(masm, inline_entry_skip_fixup);
1010
1011 entry_address[AdapterBlob::C2I_Inline] = __ pc();
1012 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1013 inline_entry_skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1014 }
1015
1016 // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1017 // the GC knows about the location of oop argument locations passed to the c2i adapter.
1018 if (allocate_code_blob) {
1019 bool caller_must_gc_arguments = (regs != regs_cc);
1020 int entry_offset[AdapterHandlerEntry::ENTRIES_COUNT];
1021 assert(AdapterHandlerEntry::ENTRIES_COUNT == 7, "sanity");
1022 AdapterHandlerLibrary::address_to_offset(entry_address, entry_offset);
1023 new_adapter = AdapterBlob::create(masm->code(), entry_offset, frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1024 }
1025 }
1026
1027 static int c_calling_convention_priv(const BasicType *sig_bt,
1028 VMRegPair *regs,
1029 int total_args_passed) {
1030
1031 // We return the amount of VMRegImpl stack slots we need to reserve for all
1032 // the arguments NOT counting out_preserve_stack_slots.
1033
1034 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1035 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
1036 };
1037 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1038 c_farg0, c_farg1, c_farg2, c_farg3,
1039 c_farg4, c_farg5, c_farg6, c_farg7
1040 };
1041
1042 uint int_args = 0;
1043 uint fp_args = 0;
1044 uint stk_args = 0; // inc by 2 each time
1045
1046 for (int i = 0; i < total_args_passed; i++) {
1047 switch (sig_bt[i]) {
1048 case T_BOOLEAN:
1049 case T_CHAR:
1050 case T_BYTE:
1051 case T_SHORT:
1052 case T_INT:
1053 if (int_args < Argument::n_int_register_parameters_c) {
1054 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1055 } else {
1056 #ifdef __APPLE__
1057 // Less-than word types are stored one after another.
1058 // The code is unable to handle this so bailout.
1059 return -1;
1060 #endif
1061 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1062 stk_args += 2;
1063 }
1064 break;
1065 case T_LONG:
1066 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1067 // fall through
1068 case T_OBJECT:
1069 case T_ARRAY:
1070 case T_ADDRESS:
1071 case T_METADATA:
1072 if (int_args < Argument::n_int_register_parameters_c) {
1073 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1074 } else {
1075 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1076 stk_args += 2;
1077 }
1078 break;
1079 case T_FLOAT:
1080 if (fp_args < Argument::n_float_register_parameters_c) {
1081 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1082 } else {
1083 #ifdef __APPLE__
1084 // Less-than word types are stored one after another.
1085 // The code is unable to handle this so bailout.
1086 return -1;
1087 #endif
1088 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1089 stk_args += 2;
1090 }
1091 break;
1092 case T_DOUBLE:
1093 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1094 if (fp_args < Argument::n_float_register_parameters_c) {
1095 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1096 } else {
1097 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1098 stk_args += 2;
1099 }
1100 break;
1101 case T_VOID: // Halves of longs and doubles
1102 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1103 regs[i].set_bad();
1104 break;
1105 default:
1106 ShouldNotReachHere();
1107 break;
1108 }
1109 }
1110
1111 return stk_args;
1112 }
1113
1114 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1115 uint num_bits,
1116 uint total_args_passed) {
1117 // More than 8 argument inputs are not supported now.
1118 assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported");
1119 assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
1120
1121 static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = {
1122 v0, v1, v2, v3, v4, v5, v6, v7
1123 };
1124
1125 // On SVE, we use the same vector registers with 128-bit vector registers on NEON.
1126 int next_reg_val = num_bits == 64 ? 1 : 3;
1127 for (uint i = 0; i < total_args_passed; i++) {
1128 VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
1129 regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
1130 }
1131 return 0;
1132 }
1133
1134 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1135 VMRegPair *regs,
1136 int total_args_passed)
1137 {
1138 int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
1139 guarantee(result >= 0, "Unsupported arguments configuration");
1140 return result;
1141 }
1142
1143
1144 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1145 // We always ignore the frame_slots arg and just use the space just below frame pointer
1146 // which by this time is free to use
1147 switch (ret_type) {
1148 case T_FLOAT:
1149 __ strs(v0, Address(rfp, -wordSize));
1150 break;
1151 case T_DOUBLE:
1152 __ strd(v0, Address(rfp, -wordSize));
1153 break;
1154 case T_VOID: break;
1155 default: {
1156 __ str(r0, Address(rfp, -wordSize));
1157 }
1158 }
1159 }
1160
1161 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1162 // We always ignore the frame_slots arg and just use the space just below frame pointer
1163 // which by this time is free to use
1164 switch (ret_type) {
1165 case T_FLOAT:
1166 __ ldrs(v0, Address(rfp, -wordSize));
1167 break;
1168 case T_DOUBLE:
1169 __ ldrd(v0, Address(rfp, -wordSize));
1170 break;
1171 case T_VOID: break;
1172 default: {
1173 __ ldr(r0, Address(rfp, -wordSize));
1174 }
1175 }
1176 }
1177 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1178 RegSet x;
1179 for ( int i = first_arg ; i < arg_count ; i++ ) {
1180 if (args[i].first()->is_Register()) {
1181 x = x + args[i].first()->as_Register();
1182 } else if (args[i].first()->is_FloatRegister()) {
1183 __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1184 }
1185 }
1186 __ push(x, sp);
1187 }
1188
1189 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1190 RegSet x;
1191 for ( int i = first_arg ; i < arg_count ; i++ ) {
1192 if (args[i].first()->is_Register()) {
1193 x = x + args[i].first()->as_Register();
1194 } else {
1195 ;
1196 }
1197 }
1198 __ pop(x, sp);
1199 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1200 if (args[i].first()->is_Register()) {
1201 ;
1202 } else if (args[i].first()->is_FloatRegister()) {
1203 __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1204 }
1205 }
1206 }
1207
1208 static void verify_oop_args(MacroAssembler* masm,
1209 const methodHandle& method,
1210 const BasicType* sig_bt,
1211 const VMRegPair* regs) {
1212 Register temp_reg = r19; // not part of any compiled calling seq
1213 if (VerifyOops) {
1214 for (int i = 0; i < method->size_of_parameters(); i++) {
1215 if (sig_bt[i] == T_OBJECT ||
1216 sig_bt[i] == T_ARRAY) {
1217 VMReg r = regs[i].first();
1218 assert(r->is_valid(), "bad oop arg");
1219 if (r->is_stack()) {
1220 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1221 __ verify_oop(temp_reg);
1222 } else {
1223 __ verify_oop(r->as_Register());
1224 }
1225 }
1226 }
1227 }
1228 }
1229
1230 // on exit, sp points to the ContinuationEntry
1231 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1232 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1233 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, "");
1234 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1235
1236 stack_slots += (int)ContinuationEntry::size()/wordSize;
1237 __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
1238
1239 OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1240
1241 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1242 __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1243 __ mov(rscratch1, sp); // we can't use sp as the source in str
1244 __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1245
1246 return map;
1247 }
1248
1249 // on entry c_rarg1 points to the continuation
1250 // sp points to ContinuationEntry
1251 // c_rarg3 -- isVirtualThread
1252 static void fill_continuation_entry(MacroAssembler* masm) {
1253 #ifdef ASSERT
1254 __ movw(rscratch1, ContinuationEntry::cookie_value());
1255 __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1256 #endif
1257
1258 __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1259 __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1260 __ str (zr, Address(sp, ContinuationEntry::chunk_offset()));
1261 __ strw(zr, Address(sp, ContinuationEntry::argsize_offset()));
1262 __ strw(zr, Address(sp, ContinuationEntry::pin_count_offset()));
1263
1264 __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1265 __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1266
1267 __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1268 }
1269
1270 // on entry, sp points to the ContinuationEntry
1271 // on exit, rfp points to the spilled rfp in the entry frame
1272 static void continuation_enter_cleanup(MacroAssembler* masm) {
1273 #ifndef PRODUCT
1274 Label OK;
1275 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1276 __ cmp(sp, rscratch1);
1277 __ br(Assembler::EQ, OK);
1278 __ stop("incorrect sp1");
1279 __ bind(OK);
1280 #endif
1281 __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1282 __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1283 __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1284 __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1285 __ add(rfp, sp, (int)ContinuationEntry::size());
1286 }
1287
1288 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1289 // On entry: c_rarg1 -- the continuation object
1290 // c_rarg2 -- isContinue
1291 // c_rarg3 -- isVirtualThread
1292 static void gen_continuation_enter(MacroAssembler* masm,
1293 const methodHandle& method,
1294 const BasicType* sig_bt,
1295 const VMRegPair* regs,
1296 int& exception_offset,
1297 OopMapSet*oop_maps,
1298 int& frame_complete,
1299 int& stack_slots,
1300 int& interpreted_entry_offset,
1301 int& compiled_entry_offset) {
1302 //verify_oop_args(masm, method, sig_bt, regs);
1303 Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1304
1305 address start = __ pc();
1306
1307 Label call_thaw, exit;
1308
1309 // i2i entry used at interp_only_mode only
1310 interpreted_entry_offset = __ pc() - start;
1311 {
1312
1313 #ifdef ASSERT
1314 Label is_interp_only;
1315 __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1316 __ cbnzw(rscratch1, is_interp_only);
1317 __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1318 __ bind(is_interp_only);
1319 #endif
1320
1321 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1322 __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1323 __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1324 __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1325 __ push_cont_fastpath(rthread);
1326
1327 __ enter();
1328 stack_slots = 2; // will be adjusted in setup
1329 OopMap* map = continuation_enter_setup(masm, stack_slots);
1330 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1331 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1332
1333 fill_continuation_entry(masm);
1334
1335 __ cbnz(c_rarg2, call_thaw);
1336
1337 const address tr_call = __ trampoline_call(resolve);
1338 if (tr_call == nullptr) {
1339 fatal("CodeCache is full at gen_continuation_enter");
1340 }
1341
1342 oop_maps->add_gc_map(__ pc() - start, map);
1343 __ post_call_nop();
1344
1345 __ b(exit);
1346
1347 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1348 if (stub == nullptr) {
1349 fatal("CodeCache is full at gen_continuation_enter");
1350 }
1351 }
1352
1353 // compiled entry
1354 __ align(CodeEntryAlignment);
1355 compiled_entry_offset = __ pc() - start;
1356
1357 __ enter();
1358 stack_slots = 2; // will be adjusted in setup
1359 OopMap* map = continuation_enter_setup(masm, stack_slots);
1360 frame_complete = __ pc() - start;
1361
1362 fill_continuation_entry(masm);
1363
1364 __ cbnz(c_rarg2, call_thaw);
1365
1366 const address tr_call = __ trampoline_call(resolve);
1367 if (tr_call == nullptr) {
1368 fatal("CodeCache is full at gen_continuation_enter");
1369 }
1370
1371 oop_maps->add_gc_map(__ pc() - start, map);
1372 __ post_call_nop();
1373
1374 __ b(exit);
1375
1376 __ bind(call_thaw);
1377
1378 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1379 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1380 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1381 ContinuationEntry::_return_pc_offset = __ pc() - start;
1382 __ post_call_nop();
1383
1384 __ bind(exit);
1385 ContinuationEntry::_cleanup_offset = __ pc() - start;
1386 continuation_enter_cleanup(masm);
1387 __ leave();
1388 __ ret(lr);
1389
1390 /// exception handling
1391
1392 exception_offset = __ pc() - start;
1393 {
1394 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1395
1396 continuation_enter_cleanup(masm);
1397
1398 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1399 __ authenticate_return_address(c_rarg1);
1400 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1401
1402 // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1403
1404 __ mov(r1, r0); // the exception handler
1405 __ mov(r0, r19); // restore return value contaning the exception oop
1406 __ verify_oop(r0);
1407
1408 __ leave();
1409 __ mov(r3, lr);
1410 __ br(r1); // the exception handler
1411 }
1412
1413 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1414 if (stub == nullptr) {
1415 fatal("CodeCache is full at gen_continuation_enter");
1416 }
1417 }
1418
1419 static void gen_continuation_yield(MacroAssembler* masm,
1420 const methodHandle& method,
1421 const BasicType* sig_bt,
1422 const VMRegPair* regs,
1423 OopMapSet* oop_maps,
1424 int& frame_complete,
1425 int& stack_slots,
1426 int& compiled_entry_offset) {
1427 enum layout {
1428 rfp_off1,
1429 rfp_off2,
1430 lr_off,
1431 lr_off2,
1432 framesize // inclusive of return address
1433 };
1434 // assert(is_even(framesize/2), "sp not 16-byte aligned");
1435 stack_slots = framesize / VMRegImpl::slots_per_word;
1436 assert(stack_slots == 2, "recheck layout");
1437
1438 address start = __ pc();
1439
1440 compiled_entry_offset = __ pc() - start;
1441 __ enter();
1442
1443 __ mov(c_rarg1, sp);
1444
1445 frame_complete = __ pc() - start;
1446 address the_pc = __ pc();
1447
1448 __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1449
1450 __ mov(c_rarg0, rthread);
1451 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1452 __ call_VM_leaf(Continuation::freeze_entry(), 2);
1453 __ reset_last_Java_frame(true);
1454
1455 Label pinned;
1456
1457 __ cbnz(r0, pinned);
1458
1459 // We've succeeded, set sp to the ContinuationEntry
1460 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1461 __ mov(sp, rscratch1);
1462 continuation_enter_cleanup(masm);
1463
1464 __ bind(pinned); // pinned -- return to caller
1465
1466 // handle pending exception thrown by freeze
1467 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1468 Label ok;
1469 __ cbz(rscratch1, ok);
1470 __ leave();
1471 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1472 __ br(rscratch1);
1473 __ bind(ok);
1474
1475 __ leave();
1476 __ ret(lr);
1477
1478 OopMap* map = new OopMap(framesize, 1);
1479 oop_maps->add_gc_map(the_pc - start, map);
1480 }
1481
1482 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1483 ::continuation_enter_cleanup(masm);
1484 }
1485
1486 static void gen_special_dispatch(MacroAssembler* masm,
1487 const methodHandle& method,
1488 const BasicType* sig_bt,
1489 const VMRegPair* regs) {
1490 verify_oop_args(masm, method, sig_bt, regs);
1491 vmIntrinsics::ID iid = method->intrinsic_id();
1492
1493 // Now write the args into the outgoing interpreter space
1494 bool has_receiver = false;
1495 Register receiver_reg = noreg;
1496 int member_arg_pos = -1;
1497 Register member_reg = noreg;
1498 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1499 if (ref_kind != 0) {
1500 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1501 member_reg = r19; // known to be free at this point
1502 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1503 } else if (iid == vmIntrinsics::_invokeBasic) {
1504 has_receiver = true;
1505 } else if (iid == vmIntrinsics::_linkToNative) {
1506 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
1507 member_reg = r19; // known to be free at this point
1508 } else {
1509 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1510 }
1511
1512 if (member_reg != noreg) {
1513 // Load the member_arg into register, if necessary.
1514 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1515 VMReg r = regs[member_arg_pos].first();
1516 if (r->is_stack()) {
1517 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1518 } else {
1519 // no data motion is needed
1520 member_reg = r->as_Register();
1521 }
1522 }
1523
1524 if (has_receiver) {
1525 // Make sure the receiver is loaded into a register.
1526 assert(method->size_of_parameters() > 0, "oob");
1527 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1528 VMReg r = regs[0].first();
1529 assert(r->is_valid(), "bad receiver arg");
1530 if (r->is_stack()) {
1531 // Porting note: This assumes that compiled calling conventions always
1532 // pass the receiver oop in a register. If this is not true on some
1533 // platform, pick a temp and load the receiver from stack.
1534 fatal("receiver always in a register");
1535 receiver_reg = r2; // known to be free at this point
1536 __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1537 } else {
1538 // no data motion is needed
1539 receiver_reg = r->as_Register();
1540 }
1541 }
1542
1543 // Figure out which address we are really jumping to:
1544 MethodHandles::generate_method_handle_dispatch(masm, iid,
1545 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1546 }
1547
1548 // ---------------------------------------------------------------------------
1549 // Generate a native wrapper for a given method. The method takes arguments
1550 // in the Java compiled code convention, marshals them to the native
1551 // convention (handlizes oops, etc), transitions to native, makes the call,
1552 // returns to java state (possibly blocking), unhandlizes any result and
1553 // returns.
1554 //
1555 // Critical native functions are a shorthand for the use of
1556 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1557 // functions. The wrapper is expected to unpack the arguments before
1558 // passing them to the callee. Critical native functions leave the state _in_Java,
1559 // since they block out GC.
1560 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1561 // block and the check for pending exceptions it's impossible for them
1562 // to be thrown.
1563 //
1564 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1565 const methodHandle& method,
1566 int compile_id,
1567 BasicType* in_sig_bt,
1568 VMRegPair* in_regs,
1569 BasicType ret_type) {
1570 if (method->is_continuation_native_intrinsic()) {
1571 int exception_offset = -1;
1572 OopMapSet* oop_maps = new OopMapSet();
1573 int frame_complete = -1;
1574 int stack_slots = -1;
1575 int interpreted_entry_offset = -1;
1576 int vep_offset = -1;
1577 if (method->is_continuation_enter_intrinsic()) {
1578 gen_continuation_enter(masm,
1579 method,
1580 in_sig_bt,
1581 in_regs,
1582 exception_offset,
1583 oop_maps,
1584 frame_complete,
1585 stack_slots,
1586 interpreted_entry_offset,
1587 vep_offset);
1588 } else if (method->is_continuation_yield_intrinsic()) {
1589 gen_continuation_yield(masm,
1590 method,
1591 in_sig_bt,
1592 in_regs,
1593 oop_maps,
1594 frame_complete,
1595 stack_slots,
1596 vep_offset);
1597 } else {
1598 guarantee(false, "Unknown Continuation native intrinsic");
1599 }
1600
1601 #ifdef ASSERT
1602 if (method->is_continuation_enter_intrinsic()) {
1603 assert(interpreted_entry_offset != -1, "Must be set");
1604 assert(exception_offset != -1, "Must be set");
1605 } else {
1606 assert(interpreted_entry_offset == -1, "Must be unset");
1607 assert(exception_offset == -1, "Must be unset");
1608 }
1609 assert(frame_complete != -1, "Must be set");
1610 assert(stack_slots != -1, "Must be set");
1611 assert(vep_offset != -1, "Must be set");
1612 #endif
1613
1614 __ flush();
1615 nmethod* nm = nmethod::new_native_nmethod(method,
1616 compile_id,
1617 masm->code(),
1618 vep_offset,
1619 frame_complete,
1620 stack_slots,
1621 in_ByteSize(-1),
1622 in_ByteSize(-1),
1623 oop_maps,
1624 exception_offset);
1625 if (nm == nullptr) return nm;
1626 if (method->is_continuation_enter_intrinsic()) {
1627 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1628 } else if (method->is_continuation_yield_intrinsic()) {
1629 _cont_doYield_stub = nm;
1630 } else {
1631 guarantee(false, "Unknown Continuation native intrinsic");
1632 }
1633 return nm;
1634 }
1635
1636 if (method->is_method_handle_intrinsic()) {
1637 vmIntrinsics::ID iid = method->intrinsic_id();
1638 intptr_t start = (intptr_t)__ pc();
1639 int vep_offset = ((intptr_t)__ pc()) - start;
1640
1641 // First instruction must be a nop as it may need to be patched on deoptimisation
1642 __ nop();
1643 gen_special_dispatch(masm,
1644 method,
1645 in_sig_bt,
1646 in_regs);
1647 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1648 __ flush();
1649 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1650 return nmethod::new_native_nmethod(method,
1651 compile_id,
1652 masm->code(),
1653 vep_offset,
1654 frame_complete,
1655 stack_slots / VMRegImpl::slots_per_word,
1656 in_ByteSize(-1),
1657 in_ByteSize(-1),
1658 nullptr);
1659 }
1660 address native_func = method->native_function();
1661 assert(native_func != nullptr, "must have function");
1662
1663 // An OopMap for lock (and class if static)
1664 OopMapSet *oop_maps = new OopMapSet();
1665 intptr_t start = (intptr_t)__ pc();
1666
1667 // We have received a description of where all the java arg are located
1668 // on entry to the wrapper. We need to convert these args to where
1669 // the jni function will expect them. To figure out where they go
1670 // we convert the java signature to a C signature by inserting
1671 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1672
1673 const int total_in_args = method->size_of_parameters();
1674 int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1675
1676 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1677 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1678
1679 int argc = 0;
1680 out_sig_bt[argc++] = T_ADDRESS;
1681 if (method->is_static()) {
1682 out_sig_bt[argc++] = T_OBJECT;
1683 }
1684
1685 for (int i = 0; i < total_in_args ; i++ ) {
1686 out_sig_bt[argc++] = in_sig_bt[i];
1687 }
1688
1689 // Now figure out where the args must be stored and how much stack space
1690 // they require.
1691 int out_arg_slots;
1692 out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1693
1694 if (out_arg_slots < 0) {
1695 return nullptr;
1696 }
1697
1698 // Compute framesize for the wrapper. We need to handlize all oops in
1699 // incoming registers
1700
1701 // Calculate the total number of stack slots we will need.
1702
1703 // First count the abi requirement plus all of the outgoing args
1704 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1705
1706 // Now the space for the inbound oop handle area
1707 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
1708
1709 int oop_handle_offset = stack_slots;
1710 stack_slots += total_save_slots;
1711
1712 // Now any space we need for handlizing a klass if static method
1713
1714 int klass_slot_offset = 0;
1715 int klass_offset = -1;
1716 int lock_slot_offset = 0;
1717 bool is_static = false;
1718
1719 if (method->is_static()) {
1720 klass_slot_offset = stack_slots;
1721 stack_slots += VMRegImpl::slots_per_word;
1722 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1723 is_static = true;
1724 }
1725
1726 // Plus a lock if needed
1727
1728 if (method->is_synchronized()) {
1729 lock_slot_offset = stack_slots;
1730 stack_slots += VMRegImpl::slots_per_word;
1731 }
1732
1733 // Now a place (+2) to save return values or temp during shuffling
1734 // + 4 for return address (which we own) and saved rfp
1735 stack_slots += 6;
1736
1737 // Ok The space we have allocated will look like:
1738 //
1739 //
1740 // FP-> | |
1741 // |---------------------|
1742 // | 2 slots for moves |
1743 // |---------------------|
1744 // | lock box (if sync) |
1745 // |---------------------| <- lock_slot_offset
1746 // | klass (if static) |
1747 // |---------------------| <- klass_slot_offset
1748 // | oopHandle area |
1749 // |---------------------| <- oop_handle_offset (8 java arg registers)
1750 // | outbound memory |
1751 // | based arguments |
1752 // | |
1753 // |---------------------|
1754 // | |
1755 // SP-> | out_preserved_slots |
1756 //
1757 //
1758
1759
1760 // Now compute actual number of stack words we need rounding to make
1761 // stack properly aligned.
1762 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1763
1764 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1765
1766 // First thing make an ic check to see if we should even be here
1767
1768 // We are free to use all registers as temps without saving them and
1769 // restoring them except rfp. rfp is the only callee save register
1770 // as far as the interpreter and the compiler(s) are concerned.
1771
1772 const Register receiver = j_rarg0;
1773
1774 Label exception_pending;
1775
1776 assert_different_registers(receiver, rscratch1);
1777 __ verify_oop(receiver);
1778 __ ic_check(8 /* end_alignment */);
1779
1780 // Verified entry point must be aligned
1781 int vep_offset = ((intptr_t)__ pc()) - start;
1782
1783 // If we have to make this method not-entrant we'll overwrite its
1784 // first instruction with a jump. For this action to be legal we
1785 // must ensure that this first instruction is a B, BL, NOP, BKPT,
1786 // SVC, HVC, or SMC. Make it a NOP.
1787 __ nop();
1788
1789 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1790 Label L_skip_barrier;
1791 __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1792 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1793 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1794
1795 __ bind(L_skip_barrier);
1796 }
1797
1798 // Generate stack overflow check
1799 __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1800
1801 // Generate a new frame for the wrapper.
1802 __ enter();
1803 // -2 because return address is already present and so is saved rfp
1804 __ sub(sp, sp, stack_size - 2*wordSize);
1805
1806 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1807 bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1808
1809 // Frame is now completed as far as size and linkage.
1810 int frame_complete = ((intptr_t)__ pc()) - start;
1811
1812 // We use r20 as the oop handle for the receiver/klass
1813 // It is callee save so it survives the call to native
1814
1815 const Register oop_handle_reg = r20;
1816
1817 //
1818 // We immediately shuffle the arguments so that any vm call we have to
1819 // make from here on out (sync slow path, jvmti, etc.) we will have
1820 // captured the oops from our caller and have a valid oopMap for
1821 // them.
1822
1823 // -----------------
1824 // The Grand Shuffle
1825
1826 // The Java calling convention is either equal (linux) or denser (win64) than the
1827 // c calling convention. However the because of the jni_env argument the c calling
1828 // convention always has at least one more (and two for static) arguments than Java.
1829 // Therefore if we move the args from java -> c backwards then we will never have
1830 // a register->register conflict and we don't have to build a dependency graph
1831 // and figure out how to break any cycles.
1832 //
1833
1834 // Record esp-based slot for receiver on stack for non-static methods
1835 int receiver_offset = -1;
1836
1837 // This is a trick. We double the stack slots so we can claim
1838 // the oops in the caller's frame. Since we are sure to have
1839 // more args than the caller doubling is enough to make
1840 // sure we can capture all the incoming oop args from the
1841 // caller.
1842 //
1843 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1844
1845 // Mark location of rfp (someday)
1846 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1847
1848
1849 int float_args = 0;
1850 int int_args = 0;
1851
1852 #ifdef ASSERT
1853 bool reg_destroyed[Register::number_of_registers];
1854 bool freg_destroyed[FloatRegister::number_of_registers];
1855 for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1856 reg_destroyed[r] = false;
1857 }
1858 for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1859 freg_destroyed[f] = false;
1860 }
1861
1862 #endif /* ASSERT */
1863
1864 // For JNI natives the incoming and outgoing registers are offset upwards.
1865 GrowableArray<int> arg_order(2 * total_in_args);
1866
1867 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1868 arg_order.push(i);
1869 arg_order.push(c_arg);
1870 }
1871
1872 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1873 int i = arg_order.at(ai);
1874 int c_arg = arg_order.at(ai + 1);
1875 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1876 assert(c_arg != -1 && i != -1, "wrong order");
1877 #ifdef ASSERT
1878 if (in_regs[i].first()->is_Register()) {
1879 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1880 } else if (in_regs[i].first()->is_FloatRegister()) {
1881 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1882 }
1883 if (out_regs[c_arg].first()->is_Register()) {
1884 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1885 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1886 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1887 }
1888 #endif /* ASSERT */
1889 switch (in_sig_bt[i]) {
1890 case T_ARRAY:
1891 case T_OBJECT:
1892 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1893 ((i == 0) && (!is_static)),
1894 &receiver_offset);
1895 int_args++;
1896 break;
1897 case T_VOID:
1898 break;
1899
1900 case T_FLOAT:
1901 __ float_move(in_regs[i], out_regs[c_arg]);
1902 float_args++;
1903 break;
1904
1905 case T_DOUBLE:
1906 assert( i + 1 < total_in_args &&
1907 in_sig_bt[i + 1] == T_VOID &&
1908 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1909 __ double_move(in_regs[i], out_regs[c_arg]);
1910 float_args++;
1911 break;
1912
1913 case T_LONG :
1914 __ long_move(in_regs[i], out_regs[c_arg]);
1915 int_args++;
1916 break;
1917
1918 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1919
1920 default:
1921 __ move32_64(in_regs[i], out_regs[c_arg]);
1922 int_args++;
1923 }
1924 }
1925
1926 // point c_arg at the first arg that is already loaded in case we
1927 // need to spill before we call out
1928 int c_arg = total_c_args - total_in_args;
1929
1930 // Pre-load a static method's oop into c_rarg1.
1931 if (method->is_static()) {
1932
1933 // load oop into a register
1934 __ movoop(c_rarg1,
1935 JNIHandles::make_local(method->method_holder()->java_mirror()));
1936
1937 // Now handlize the static class mirror it's known not-null.
1938 __ str(c_rarg1, Address(sp, klass_offset));
1939 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1940
1941 // Now get the handle
1942 __ lea(c_rarg1, Address(sp, klass_offset));
1943 // and protect the arg if we must spill
1944 c_arg--;
1945 }
1946
1947 // Change state to native (we save the return address in the thread, since it might not
1948 // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1949 // points into the right code segment. It does not have to be the correct return pc.
1950 // We use the same pc/oopMap repeatedly when we call out.
1951
1952 Label native_return;
1953 if (method->is_object_wait0()) {
1954 // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1955 __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1956 } else {
1957 intptr_t the_pc = (intptr_t) __ pc();
1958 oop_maps->add_gc_map(the_pc - start, map);
1959
1960 __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1961 }
1962
1963 Label dtrace_method_entry, dtrace_method_entry_done;
1964 if (DTraceMethodProbes) {
1965 __ b(dtrace_method_entry);
1966 __ bind(dtrace_method_entry_done);
1967 }
1968
1969 // RedefineClasses() tracing support for obsolete method entry
1970 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1971 // protect the args we've loaded
1972 save_args(masm, total_c_args, c_arg, out_regs);
1973 __ mov_metadata(c_rarg1, method());
1974 __ call_VM_leaf(
1975 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1976 rthread, c_rarg1);
1977 restore_args(masm, total_c_args, c_arg, out_regs);
1978 }
1979
1980 // Lock a synchronized method
1981
1982 // Register definitions used by locking and unlocking
1983
1984 const Register swap_reg = r0;
1985 const Register obj_reg = r19; // Will contain the oop
1986 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
1987 const Register old_hdr = r13; // value of old header at unlock time
1988 const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock
1989 const Register tmp = lr;
1990
1991 Label slow_path_lock;
1992 Label lock_done;
1993
1994 if (method->is_synchronized()) {
1995 // Get the handle (the 2nd argument)
1996 __ mov(oop_handle_reg, c_rarg1);
1997
1998 // Get address of the box
1999
2000 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2001
2002 // Load the oop from the handle
2003 __ ldr(obj_reg, Address(oop_handle_reg, 0));
2004
2005 __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
2006
2007 // Slow path will re-enter here
2008 __ bind(lock_done);
2009 }
2010
2011
2012 // Finally just about ready to make the JNI call
2013
2014 // get JNIEnv* which is first argument to native
2015 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
2016
2017 // Now set thread in native
2018 __ mov(rscratch1, _thread_in_native);
2019 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2020 __ stlrw(rscratch1, rscratch2);
2021
2022 __ rt_call(native_func);
2023
2024 // Verify or restore cpu control state after JNI call
2025 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
2026
2027 // Unpack native results.
2028 switch (ret_type) {
2029 case T_BOOLEAN: __ c2bool(r0); break;
2030 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
2031 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
2032 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
2033 case T_INT : __ sbfx(r0, r0, 0, 32); break;
2034 case T_DOUBLE :
2035 case T_FLOAT :
2036 // Result is in v0 we'll save as needed
2037 break;
2038 case T_ARRAY: // Really a handle
2039 case T_OBJECT: // Really a handle
2040 break; // can't de-handlize until after safepoint check
2041 case T_VOID: break;
2042 case T_LONG: break;
2043 default : ShouldNotReachHere();
2044 }
2045
2046 Label safepoint_in_progress, safepoint_in_progress_done;
2047
2048 // Switch thread to "native transition" state before reading the synchronization state.
2049 // This additional state is necessary because reading and testing the synchronization
2050 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2051 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2052 // VM thread changes sync state to synchronizing and suspends threads for GC.
2053 // Thread A is resumed to finish this native method, but doesn't block here since it
2054 // didn't see any synchronization is progress, and escapes.
2055 __ mov(rscratch1, _thread_in_native_trans);
2056
2057 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2058
2059 // Force this write out before the read below
2060 if (!UseSystemMemoryBarrier) {
2061 __ dmb(Assembler::ISH);
2062 }
2063
2064 __ verify_sve_vector_length();
2065
2066 // Check for safepoint operation in progress and/or pending suspend requests.
2067 {
2068 // No need for acquire as Java threads always disarm themselves.
2069 __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
2070 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
2071 __ cbnzw(rscratch1, safepoint_in_progress);
2072 __ bind(safepoint_in_progress_done);
2073 }
2074
2075 // change thread state
2076 __ mov(rscratch1, _thread_in_Java);
2077 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2078 __ stlrw(rscratch1, rscratch2);
2079
2080 if (method->is_object_wait0()) {
2081 // Check preemption for Object.wait()
2082 __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2083 __ cbz(rscratch1, native_return);
2084 __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2085 __ br(rscratch1);
2086 __ bind(native_return);
2087
2088 intptr_t the_pc = (intptr_t) __ pc();
2089 oop_maps->add_gc_map(the_pc - start, map);
2090 }
2091
2092 Label reguard;
2093 Label reguard_done;
2094 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
2095 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
2096 __ br(Assembler::EQ, reguard);
2097 __ bind(reguard_done);
2098
2099 // native result if any is live
2100
2101 // Unlock
2102 Label unlock_done;
2103 Label slow_path_unlock;
2104 if (method->is_synchronized()) {
2105
2106 // Get locked oop from the handle we passed to jni
2107 __ ldr(obj_reg, Address(oop_handle_reg, 0));
2108
2109 // Must save r0 if if it is live now because cmpxchg must use it
2110 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2111 save_native_result(masm, ret_type, stack_slots);
2112 }
2113
2114 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
2115
2116 // slow path re-enters here
2117 __ bind(unlock_done);
2118 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2119 restore_native_result(masm, ret_type, stack_slots);
2120 }
2121 }
2122
2123 Label dtrace_method_exit, dtrace_method_exit_done;
2124 if (DTraceMethodProbes) {
2125 __ b(dtrace_method_exit);
2126 __ bind(dtrace_method_exit_done);
2127 }
2128
2129 __ reset_last_Java_frame(false);
2130
2131 // Unbox oop result, e.g. JNIHandles::resolve result.
2132 if (is_reference_type(ret_type)) {
2133 __ resolve_jobject(r0, r1, r2);
2134 }
2135
2136 if (CheckJNICalls) {
2137 // clear_pending_jni_exception_check
2138 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2139 }
2140
2141 // reset handle block
2142 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2143 __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2144
2145 __ leave();
2146
2147 #if INCLUDE_JFR
2148 // We need to do a poll test after unwind in case the sampler
2149 // managed to sample the native frame after returning to Java.
2150 Label L_return;
2151 __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2152 address poll_test_pc = __ pc();
2153 __ relocate(relocInfo::poll_return_type);
2154 __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), L_return);
2155 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
2156 "polling page return stub not created yet");
2157 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
2158 __ adr(rscratch1, InternalAddress(poll_test_pc));
2159 __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
2160 __ far_jump(RuntimeAddress(stub));
2161 __ bind(L_return);
2162 #endif // INCLUDE_JFR
2163
2164 // Any exception pending?
2165 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2166 __ cbnz(rscratch1, exception_pending);
2167
2168 // We're done
2169 __ ret(lr);
2170
2171 // Unexpected paths are out of line and go here
2172
2173 // forward the exception
2174 __ bind(exception_pending);
2175
2176 // and forward the exception
2177 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2178
2179 // Slow path locking & unlocking
2180 if (method->is_synchronized()) {
2181
2182 __ block_comment("Slow path lock {");
2183 __ bind(slow_path_lock);
2184
2185 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2186 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2187
2188 // protect the args we've loaded
2189 save_args(masm, total_c_args, c_arg, out_regs);
2190
2191 __ mov(c_rarg0, obj_reg);
2192 __ mov(c_rarg1, lock_reg);
2193 __ mov(c_rarg2, rthread);
2194
2195 // Not a leaf but we have last_Java_frame setup as we want.
2196 // We don't want to unmount in case of contention since that would complicate preserving
2197 // the arguments that had already been marshalled into the native convention. So we force
2198 // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2199 // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2200 __ push_cont_fastpath();
2201 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2202 __ pop_cont_fastpath();
2203 restore_args(masm, total_c_args, c_arg, out_regs);
2204
2205 #ifdef ASSERT
2206 { Label L;
2207 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2208 __ cbz(rscratch1, L);
2209 __ stop("no pending exception allowed on exit from monitorenter");
2210 __ bind(L);
2211 }
2212 #endif
2213 __ b(lock_done);
2214
2215 __ block_comment("} Slow path lock");
2216
2217 __ block_comment("Slow path unlock {");
2218 __ bind(slow_path_unlock);
2219
2220 // If we haven't already saved the native result we must save it now as xmm registers
2221 // are still exposed.
2222
2223 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2224 save_native_result(masm, ret_type, stack_slots);
2225 }
2226
2227 __ mov(c_rarg2, rthread);
2228 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2229 __ mov(c_rarg0, obj_reg);
2230
2231 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2232 // NOTE that obj_reg == r19 currently
2233 __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2234 __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2235
2236 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2237
2238 #ifdef ASSERT
2239 {
2240 Label L;
2241 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2242 __ cbz(rscratch1, L);
2243 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2244 __ bind(L);
2245 }
2246 #endif /* ASSERT */
2247
2248 __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2249
2250 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2251 restore_native_result(masm, ret_type, stack_slots);
2252 }
2253 __ b(unlock_done);
2254
2255 __ block_comment("} Slow path unlock");
2256
2257 } // synchronized
2258
2259 // SLOW PATH Reguard the stack if needed
2260
2261 __ bind(reguard);
2262 save_native_result(masm, ret_type, stack_slots);
2263 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2264 restore_native_result(masm, ret_type, stack_slots);
2265 // and continue
2266 __ b(reguard_done);
2267
2268 // SLOW PATH safepoint
2269 {
2270 __ block_comment("safepoint {");
2271 __ bind(safepoint_in_progress);
2272
2273 // Don't use call_VM as it will see a possible pending exception and forward it
2274 // and never return here preventing us from clearing _last_native_pc down below.
2275 //
2276 save_native_result(masm, ret_type, stack_slots);
2277 __ mov(c_rarg0, rthread);
2278 #ifndef PRODUCT
2279 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2280 #endif
2281 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2282 __ blr(rscratch1);
2283
2284 // Restore any method result value
2285 restore_native_result(masm, ret_type, stack_slots);
2286
2287 __ b(safepoint_in_progress_done);
2288 __ block_comment("} safepoint");
2289 }
2290
2291 // SLOW PATH dtrace support
2292 if (DTraceMethodProbes) {
2293 {
2294 __ block_comment("dtrace entry {");
2295 __ bind(dtrace_method_entry);
2296
2297 // We have all of the arguments setup at this point. We must not touch any register
2298 // argument registers at this point (what if we save/restore them there are no oop?
2299
2300 save_args(masm, total_c_args, c_arg, out_regs);
2301 __ mov_metadata(c_rarg1, method());
2302 __ call_VM_leaf(
2303 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2304 rthread, c_rarg1);
2305 restore_args(masm, total_c_args, c_arg, out_regs);
2306 __ b(dtrace_method_entry_done);
2307 __ block_comment("} dtrace entry");
2308 }
2309
2310 {
2311 __ block_comment("dtrace exit {");
2312 __ bind(dtrace_method_exit);
2313 save_native_result(masm, ret_type, stack_slots);
2314 __ mov_metadata(c_rarg1, method());
2315 __ call_VM_leaf(
2316 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2317 rthread, c_rarg1);
2318 restore_native_result(masm, ret_type, stack_slots);
2319 __ b(dtrace_method_exit_done);
2320 __ block_comment("} dtrace exit");
2321 }
2322 }
2323
2324 __ flush();
2325
2326 nmethod *nm = nmethod::new_native_nmethod(method,
2327 compile_id,
2328 masm->code(),
2329 vep_offset,
2330 frame_complete,
2331 stack_slots / VMRegImpl::slots_per_word,
2332 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2333 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2334 oop_maps);
2335
2336 return nm;
2337 }
2338
2339 // this function returns the adjust size (in number of words) to a c2i adapter
2340 // activation for use during deoptimization
2341 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2342 assert(callee_locals >= callee_parameters,
2343 "test and remove; got more parms than locals");
2344 if (callee_locals < callee_parameters)
2345 return 0; // No adjustment for negative locals
2346 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2347 // diff is counted in stack words
2348 return align_up(diff, 2);
2349 }
2350
2351
2352 //------------------------------generate_deopt_blob----------------------------
2353 void SharedRuntime::generate_deopt_blob() {
2354 // Allocate space for the code
2355 ResourceMark rm;
2356 // Setup code generation tools
2357 int pad = 0;
2358 #if INCLUDE_JVMCI
2359 if (EnableJVMCI) {
2360 pad += 512; // Increase the buffer size when compiling for JVMCI
2361 }
2362 #endif
2363 const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2364 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2365 if (blob != nullptr) {
2366 _deopt_blob = blob->as_deoptimization_blob();
2367 return;
2368 }
2369
2370 CodeBuffer buffer(name, 2048+pad, 1024);
2371 MacroAssembler* masm = new MacroAssembler(&buffer);
2372 int frame_size_in_words;
2373 OopMap* map = nullptr;
2374 OopMapSet *oop_maps = new OopMapSet();
2375 RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2376
2377 // -------------
2378 // This code enters when returning to a de-optimized nmethod. A return
2379 // address has been pushed on the stack, and return values are in
2380 // registers.
2381 // If we are doing a normal deopt then we were called from the patched
2382 // nmethod from the point we returned to the nmethod. So the return
2383 // address on the stack is wrong by NativeCall::instruction_size
2384 // We will adjust the value so it looks like we have the original return
2385 // address on the stack (like when we eagerly deoptimized).
2386 // In the case of an exception pending when deoptimizing, we enter
2387 // with a return address on the stack that points after the call we patched
2388 // into the exception handler. We have the following register state from,
2389 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2390 // r0: exception oop
2391 // r19: exception handler
2392 // r3: throwing pc
2393 // So in this case we simply jam r3 into the useless return address and
2394 // the stack looks just like we want.
2395 //
2396 // At this point we need to de-opt. We save the argument return
2397 // registers. We call the first C routine, fetch_unroll_info(). This
2398 // routine captures the return values and returns a structure which
2399 // describes the current frame size and the sizes of all replacement frames.
2400 // The current frame is compiled code and may contain many inlined
2401 // functions, each with their own JVM state. We pop the current frame, then
2402 // push all the new frames. Then we call the C routine unpack_frames() to
2403 // populate these frames. Finally unpack_frames() returns us the new target
2404 // address. Notice that callee-save registers are BLOWN here; they have
2405 // already been captured in the vframeArray at the time the return PC was
2406 // patched.
2407 address start = __ pc();
2408 Label cont;
2409
2410 // Prolog for non exception case!
2411
2412 // Save everything in sight.
2413 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2414
2415 // Normal deoptimization. Save exec mode for unpack_frames.
2416 __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2417 __ b(cont);
2418
2419 int reexecute_offset = __ pc() - start;
2420 #if INCLUDE_JVMCI && !defined(COMPILER1)
2421 if (UseJVMCICompiler) {
2422 // JVMCI does not use this kind of deoptimization
2423 __ should_not_reach_here();
2424 }
2425 #endif
2426
2427 // Reexecute case
2428 // return address is the pc describes what bci to do re-execute at
2429
2430 // No need to update map as each call to save_live_registers will produce identical oopmap
2431 (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2432
2433 __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2434 __ b(cont);
2435
2436 #if INCLUDE_JVMCI
2437 Label after_fetch_unroll_info_call;
2438 int implicit_exception_uncommon_trap_offset = 0;
2439 int uncommon_trap_offset = 0;
2440
2441 if (EnableJVMCI) {
2442 implicit_exception_uncommon_trap_offset = __ pc() - start;
2443
2444 __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2445 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2446
2447 uncommon_trap_offset = __ pc() - start;
2448
2449 // Save everything in sight.
2450 reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2451 // fetch_unroll_info needs to call last_java_frame()
2452 Label retaddr;
2453 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2454
2455 __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2456 __ movw(rscratch1, -1);
2457 __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2458
2459 __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2460 __ mov(c_rarg0, rthread);
2461 __ movw(c_rarg2, rcpool); // exec mode
2462 __ lea(rscratch1,
2463 RuntimeAddress(CAST_FROM_FN_PTR(address,
2464 Deoptimization::uncommon_trap)));
2465 __ blr(rscratch1);
2466 __ bind(retaddr);
2467 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2468
2469 __ reset_last_Java_frame(false);
2470
2471 __ b(after_fetch_unroll_info_call);
2472 } // EnableJVMCI
2473 #endif // INCLUDE_JVMCI
2474
2475 int exception_offset = __ pc() - start;
2476
2477 // Prolog for exception case
2478
2479 // all registers are dead at this entry point, except for r0, and
2480 // r3 which contain the exception oop and exception pc
2481 // respectively. Set them in TLS and fall thru to the
2482 // unpack_with_exception_in_tls entry point.
2483
2484 __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2485 __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2486
2487 int exception_in_tls_offset = __ pc() - start;
2488
2489 // new implementation because exception oop is now passed in JavaThread
2490
2491 // Prolog for exception case
2492 // All registers must be preserved because they might be used by LinearScan
2493 // Exceptiop oop and throwing PC are passed in JavaThread
2494 // tos: stack at point of call to method that threw the exception (i.e. only
2495 // args are on the stack, no return address)
2496
2497 // The return address pushed by save_live_registers will be patched
2498 // later with the throwing pc. The correct value is not available
2499 // now because loading it from memory would destroy registers.
2500
2501 // NB: The SP at this point must be the SP of the method that is
2502 // being deoptimized. Deoptimization assumes that the frame created
2503 // here by save_live_registers is immediately below the method's SP.
2504 // This is a somewhat fragile mechanism.
2505
2506 // Save everything in sight.
2507 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2508
2509 // Now it is safe to overwrite any register
2510
2511 // Deopt during an exception. Save exec mode for unpack_frames.
2512 __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2513
2514 // load throwing pc from JavaThread and patch it as the return address
2515 // of the current frame. Then clear the field in JavaThread
2516 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2517 __ protect_return_address(r3);
2518 __ str(r3, Address(rfp, wordSize));
2519 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2520
2521 #ifdef ASSERT
2522 // verify that there is really an exception oop in JavaThread
2523 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2524 __ verify_oop(r0);
2525
2526 // verify that there is no pending exception
2527 Label no_pending_exception;
2528 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2529 __ cbz(rscratch1, no_pending_exception);
2530 __ stop("must not have pending exception here");
2531 __ bind(no_pending_exception);
2532 #endif
2533
2534 __ bind(cont);
2535
2536 // Call C code. Need thread and this frame, but NOT official VM entry
2537 // crud. We cannot block on this call, no GC can happen.
2538 //
2539 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2540
2541 // fetch_unroll_info needs to call last_java_frame().
2542
2543 Label retaddr;
2544 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2545 #ifdef ASSERT
2546 { Label L;
2547 __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2548 __ cbz(rscratch1, L);
2549 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2550 __ bind(L);
2551 }
2552 #endif // ASSERT
2553 __ mov(c_rarg0, rthread);
2554 __ mov(c_rarg1, rcpool);
2555 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2556 __ blr(rscratch1);
2557 __ bind(retaddr);
2558
2559 // Need to have an oopmap that tells fetch_unroll_info where to
2560 // find any register it might need.
2561 oop_maps->add_gc_map(__ pc() - start, map);
2562
2563 __ reset_last_Java_frame(false);
2564
2565 #if INCLUDE_JVMCI
2566 if (EnableJVMCI) {
2567 __ bind(after_fetch_unroll_info_call);
2568 }
2569 #endif
2570
2571 // Load UnrollBlock* into r5
2572 __ mov(r5, r0);
2573
2574 __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2575 Label noException;
2576 __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
2577 __ br(Assembler::NE, noException);
2578 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2579 // QQQ this is useless it was null above
2580 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2581 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2582 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2583
2584 __ verify_oop(r0);
2585
2586 // Overwrite the result registers with the exception results.
2587 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2588 // I think this is useless
2589 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2590
2591 __ bind(noException);
2592
2593 // Only register save data is on the stack.
2594 // Now restore the result registers. Everything else is either dead
2595 // or captured in the vframeArray.
2596
2597 // Restore fp result register
2598 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2599 // Restore integer result register
2600 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2601
2602 // Pop all of the register save area off the stack
2603 __ add(sp, sp, frame_size_in_words * wordSize);
2604
2605 // All of the register save area has been popped of the stack. Only the
2606 // return address remains.
2607
2608 // Pop all the frames we must move/replace.
2609 //
2610 // Frame picture (youngest to oldest)
2611 // 1: self-frame (no frame link)
2612 // 2: deopting frame (no frame link)
2613 // 3: caller of deopting frame (could be compiled/interpreted).
2614 //
2615 // Note: by leaving the return address of self-frame on the stack
2616 // and using the size of frame 2 to adjust the stack
2617 // when we are done the return to frame 3 will still be on the stack.
2618
2619 // Pop deoptimized frame
2620 __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2621 __ sub(r2, r2, 2 * wordSize);
2622 __ add(sp, sp, r2);
2623 __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2624
2625 #ifdef ASSERT
2626 // Compilers generate code that bang the stack by as much as the
2627 // interpreter would need. So this stack banging should never
2628 // trigger a fault. Verify that it does not on non product builds.
2629 __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2630 __ bang_stack_size(r19, r2);
2631 #endif
2632 // Load address of array of frame pcs into r2
2633 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2634
2635 // Trash the old pc
2636 // __ addptr(sp, wordSize); FIXME ????
2637
2638 // Load address of array of frame sizes into r4
2639 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2640
2641 // Load counter into r3
2642 __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2643
2644 // Now adjust the caller's stack to make up for the extra locals
2645 // but record the original sp so that we can save it in the skeletal interpreter
2646 // frame and the stack walking of interpreter_sender will get the unextended sp
2647 // value and not the "real" sp value.
2648
2649 const Register sender_sp = r6;
2650
2651 __ mov(sender_sp, sp);
2652 __ ldrw(r19, Address(r5,
2653 Deoptimization::UnrollBlock::
2654 caller_adjustment_offset()));
2655 __ sub(sp, sp, r19);
2656
2657 // Push interpreter frames in a loop
2658 __ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern
2659 __ mov(rscratch2, rscratch1);
2660 Label loop;
2661 __ bind(loop);
2662 __ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size
2663 __ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand
2664 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc
2665 __ enter(); // Save old & set new fp
2666 __ sub(sp, sp, r19); // Prolog
2667 // This value is corrected by layout_activation_impl
2668 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2669 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2670 __ mov(sender_sp, sp); // Pass sender_sp to next frame
2671 __ sub(r3, r3, 1); // Decrement counter
2672 __ cbnz(r3, loop);
2673
2674 // Re-push self-frame
2675 __ ldr(lr, Address(r2));
2676 __ enter();
2677
2678 // Allocate a full sized register save area. We subtract 2 because
2679 // enter() just pushed 2 words
2680 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2681
2682 // Restore frame locals after moving the frame
2683 __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2684 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2685
2686 // Call C code. Need thread but NOT official VM entry
2687 // crud. We cannot block on this call, no GC can happen. Call should
2688 // restore return values to their stack-slots with the new SP.
2689 //
2690 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2691
2692 // Use rfp because the frames look interpreted now
2693 // Don't need the precise return PC here, just precise enough to point into this code blob.
2694 address the_pc = __ pc();
2695 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2696
2697 __ mov(c_rarg0, rthread);
2698 __ movw(c_rarg1, rcpool); // second arg: exec_mode
2699 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2700 __ blr(rscratch1);
2701
2702 // Set an oopmap for the call site
2703 // Use the same PC we used for the last java frame
2704 oop_maps->add_gc_map(the_pc - start,
2705 new OopMap( frame_size_in_words, 0 ));
2706
2707 // Clear fp AND pc
2708 __ reset_last_Java_frame(true);
2709
2710 // Collect return values
2711 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2712 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2713 // I think this is useless (throwing pc?)
2714 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2715
2716 // Pop self-frame.
2717 __ leave(); // Epilog
2718
2719 // Jump to interpreter
2720 __ ret(lr);
2721
2722 // Make sure all code is generated
2723 masm->flush();
2724
2725 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2726 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2727 #if INCLUDE_JVMCI
2728 if (EnableJVMCI) {
2729 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2730 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2731 }
2732 #endif
2733
2734 AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2735 }
2736
2737 // Number of stack slots between incoming argument block and the start of
2738 // a new frame. The PROLOG must add this many slots to the stack. The
2739 // EPILOG must remove this many slots. aarch64 needs two slots for
2740 // return address and fp.
2741 // TODO think this is correct but check
2742 uint SharedRuntime::in_preserve_stack_slots() {
2743 return 4;
2744 }
2745
2746 uint SharedRuntime::out_preserve_stack_slots() {
2747 return 0;
2748 }
2749
2750
2751 VMReg SharedRuntime::thread_register() {
2752 return rthread->as_VMReg();
2753 }
2754
2755 //------------------------------generate_handler_blob------
2756 //
2757 // Generate a special Compile2Runtime blob that saves all registers,
2758 // and setup oopmap.
2759 //
2760 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
2761 assert(is_polling_page_id(id), "expected a polling page stub id");
2762
2763 // Allocate space for the code. Setup code generation tools.
2764 const char* name = SharedRuntime::stub_name(id);
2765 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2766 if (blob != nullptr) {
2767 return blob->as_safepoint_blob();
2768 }
2769
2770 ResourceMark rm;
2771 OopMapSet *oop_maps = new OopMapSet();
2772 OopMap* map;
2773 CodeBuffer buffer(name, 2048, 1024);
2774 MacroAssembler* masm = new MacroAssembler(&buffer);
2775
2776 address start = __ pc();
2777 address call_pc = nullptr;
2778 int frame_size_in_words;
2779 bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
2780 RegisterSaver reg_save(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */);
2781
2782 // When the signal occurred, the LR was either signed and stored on the stack (in which
2783 // case it will be restored from the stack before being used) or unsigned and not stored
2784 // on the stack. Stipping ensures we get the right value.
2785 __ strip_return_address();
2786
2787 // Save Integer and Float registers.
2788 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2789
2790 // The following is basically a call_VM. However, we need the precise
2791 // address of the call in order to generate an oopmap. Hence, we do all the
2792 // work ourselves.
2793
2794 Label retaddr;
2795 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2796
2797 // The return address must always be correct so that frame constructor never
2798 // sees an invalid pc.
2799
2800 if (!cause_return) {
2801 // overwrite the return address pushed by save_live_registers
2802 // Additionally, r20 is a callee-saved register so we can look at
2803 // it later to determine if someone changed the return address for
2804 // us!
2805 __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2806 __ protect_return_address(r20);
2807 __ str(r20, Address(rfp, wordSize));
2808 }
2809
2810 // Do the call
2811 __ mov(c_rarg0, rthread);
2812 __ lea(rscratch1, RuntimeAddress(call_ptr));
2813 __ blr(rscratch1);
2814 __ bind(retaddr);
2815
2816 // Set an oopmap for the call site. This oopmap will map all
2817 // oop-registers and debug-info registers as callee-saved. This
2818 // will allow deoptimization at this safepoint to find all possible
2819 // debug-info recordings, as well as let GC find all oops.
2820
2821 oop_maps->add_gc_map( __ pc() - start, map);
2822
2823 Label noException;
2824
2825 __ reset_last_Java_frame(false);
2826
2827 __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2828
2829 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2830 __ cbz(rscratch1, noException);
2831
2832 // Exception pending
2833
2834 reg_save.restore_live_registers(masm);
2835
2836 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2837
2838 // No exception case
2839 __ bind(noException);
2840
2841 Label no_adjust, bail;
2842 if (!cause_return) {
2843 // If our stashed return pc was modified by the runtime we avoid touching it
2844 __ ldr(rscratch1, Address(rfp, wordSize));
2845 __ cmp(r20, rscratch1);
2846 __ br(Assembler::NE, no_adjust);
2847 __ authenticate_return_address(r20);
2848
2849 #ifdef ASSERT
2850 // Verify the correct encoding of the poll we're about to skip.
2851 // See NativeInstruction::is_ldrw_to_zr()
2852 __ ldrw(rscratch1, Address(r20));
2853 __ ubfx(rscratch2, rscratch1, 22, 10);
2854 __ cmpw(rscratch2, 0b1011100101);
2855 __ br(Assembler::NE, bail);
2856 __ ubfx(rscratch2, rscratch1, 0, 5);
2857 __ cmpw(rscratch2, 0b11111);
2858 __ br(Assembler::NE, bail);
2859 #endif
2860 // Adjust return pc forward to step over the safepoint poll instruction
2861 __ add(r20, r20, NativeInstruction::instruction_size);
2862 __ protect_return_address(r20);
2863 __ str(r20, Address(rfp, wordSize));
2864 }
2865
2866 __ bind(no_adjust);
2867 // Normal exit, restore registers and exit.
2868 reg_save.restore_live_registers(masm);
2869
2870 __ ret(lr);
2871
2872 #ifdef ASSERT
2873 __ bind(bail);
2874 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2875 #endif
2876
2877 // Make sure all code is generated
2878 masm->flush();
2879
2880 // Fill-out other meta info
2881 SafepointBlob* sp_blob = SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2882
2883 AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2884 return sp_blob;
2885 }
2886
2887 //
2888 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2889 //
2890 // Generate a stub that calls into vm to find out the proper destination
2891 // of a java call. All the argument registers are live at this point
2892 // but since this is generic code we don't know what they are and the caller
2893 // must do any gc of the args.
2894 //
2895 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
2896 assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2897 assert(is_resolve_id(id), "expected a resolve stub id");
2898
2899 const char* name = SharedRuntime::stub_name(id);
2900 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2901 if (blob != nullptr) {
2902 return blob->as_runtime_stub();
2903 }
2904
2905 // allocate space for the code
2906 ResourceMark rm;
2907 CodeBuffer buffer(name, 1000, 512);
2908 MacroAssembler* masm = new MacroAssembler(&buffer);
2909
2910 int frame_size_in_words;
2911 RegisterSaver reg_save(false /* save_vectors */);
2912
2913 OopMapSet *oop_maps = new OopMapSet();
2914 OopMap* map = nullptr;
2915
2916 int start = __ offset();
2917
2918 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2919
2920 int frame_complete = __ offset();
2921
2922 {
2923 Label retaddr;
2924 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2925
2926 __ mov(c_rarg0, rthread);
2927 __ lea(rscratch1, RuntimeAddress(destination));
2928
2929 __ blr(rscratch1);
2930 __ bind(retaddr);
2931 }
2932
2933 // Set an oopmap for the call site.
2934 // We need this not only for callee-saved registers, but also for volatile
2935 // registers that the compiler might be keeping live across a safepoint.
2936
2937 oop_maps->add_gc_map( __ offset() - start, map);
2938
2939 // r0 contains the address we are going to jump to assuming no exception got installed
2940
2941 // clear last_Java_sp
2942 __ reset_last_Java_frame(false);
2943 // check for pending exceptions
2944 Label pending;
2945 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2946 __ cbnz(rscratch1, pending);
2947
2948 // get the returned Method*
2949 __ get_vm_result_metadata(rmethod, rthread);
2950 __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2951
2952 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2953 __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2954 reg_save.restore_live_registers(masm);
2955
2956 // We are back to the original state on entry and ready to go.
2957
2958 __ br(rscratch1);
2959
2960 // Pending exception after the safepoint
2961
2962 __ bind(pending);
2963
2964 reg_save.restore_live_registers(masm);
2965
2966 // exception pending => remove activation and forward to exception handler
2967
2968 __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
2969
2970 __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2971 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2972
2973 // -------------
2974 // make sure all code is generated
2975 masm->flush();
2976
2977 // return the blob
2978 // frame_size_words or bytes??
2979 RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2980
2981 AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2982 return rs_blob;
2983 }
2984
2985 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2986 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
2987 if (buf == nullptr) {
2988 return nullptr;
2989 }
2990 CodeBuffer buffer(buf);
2991 short buffer_locs[20];
2992 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2993 sizeof(buffer_locs)/sizeof(relocInfo));
2994
2995 MacroAssembler _masm(&buffer);
2996 MacroAssembler* masm = &_masm;
2997
2998 const Array<SigEntry>* sig_vk = vk->extended_sig();
2999 const Array<VMRegPair>* regs = vk->return_regs();
3000
3001 int pack_fields_jobject_off = __ offset();
3002 // Resolve pre-allocated buffer from JNI handle.
3003 // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3004 Register Rresult = r14; // See StubGenerator::generate_call_stub().
3005 __ ldr(r0, Address(Rresult));
3006 __ resolve_jobject(r0 /* value */,
3007 rthread /* thread */,
3008 r12 /* tmp */);
3009 __ str(r0, Address(Rresult));
3010
3011 int pack_fields_off = __ offset();
3012
3013 int j = 1;
3014 for (int i = 0; i < sig_vk->length(); i++) {
3015 BasicType bt = sig_vk->at(i)._bt;
3016 if (bt == T_METADATA) {
3017 continue;
3018 }
3019 if (bt == T_VOID) {
3020 if (sig_vk->at(i-1)._bt == T_LONG ||
3021 sig_vk->at(i-1)._bt == T_DOUBLE) {
3022 j++;
3023 }
3024 continue;
3025 }
3026 int off = sig_vk->at(i)._offset;
3027 VMRegPair pair = regs->at(j);
3028 VMReg r_1 = pair.first();
3029 VMReg r_2 = pair.second();
3030 Address to(r0, off);
3031 if (bt == T_FLOAT) {
3032 __ strs(r_1->as_FloatRegister(), to);
3033 } else if (bt == T_DOUBLE) {
3034 __ strd(r_1->as_FloatRegister(), to);
3035 } else {
3036 Register val = r_1->as_Register();
3037 assert_different_registers(to.base(), val, r15, r16, r17);
3038 if (is_reference_type(bt)) {
3039 __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3040 } else {
3041 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3042 }
3043 }
3044 j++;
3045 }
3046 assert(j == regs->length(), "missed a field?");
3047 if (vk->has_nullable_atomic_layout()) {
3048 // Zero the null marker (setting it to 1 would be better but would require an additional register)
3049 __ strb(zr, Address(r0, vk->null_marker_offset()));
3050 }
3051 __ ret(lr);
3052
3053 int unpack_fields_off = __ offset();
3054
3055 Label skip;
3056 Label not_null;
3057 __ cbnz(r0, not_null);
3058
3059 // Return value is null. Zero oop registers to make the GC happy.
3060 j = 1;
3061 for (int i = 0; i < sig_vk->length(); i++) {
3062 BasicType bt = sig_vk->at(i)._bt;
3063 if (bt == T_METADATA) {
3064 continue;
3065 }
3066 if (bt == T_VOID) {
3067 if (sig_vk->at(i-1)._bt == T_LONG ||
3068 sig_vk->at(i-1)._bt == T_DOUBLE) {
3069 j++;
3070 }
3071 continue;
3072 }
3073 if (bt == T_OBJECT || bt == T_ARRAY) {
3074 VMRegPair pair = regs->at(j);
3075 VMReg r_1 = pair.first();
3076 __ mov(r_1->as_Register(), zr);
3077 }
3078 j++;
3079 }
3080 __ b(skip);
3081 __ bind(not_null);
3082
3083 j = 1;
3084 for (int i = 0; i < sig_vk->length(); i++) {
3085 BasicType bt = sig_vk->at(i)._bt;
3086 if (bt == T_METADATA) {
3087 continue;
3088 }
3089 if (bt == T_VOID) {
3090 if (sig_vk->at(i-1)._bt == T_LONG ||
3091 sig_vk->at(i-1)._bt == T_DOUBLE) {
3092 j++;
3093 }
3094 continue;
3095 }
3096 int off = sig_vk->at(i)._offset;
3097 assert(off > 0, "offset in object should be positive");
3098 VMRegPair pair = regs->at(j);
3099 VMReg r_1 = pair.first();
3100 VMReg r_2 = pair.second();
3101 Address from(r0, off);
3102 if (bt == T_FLOAT) {
3103 __ ldrs(r_1->as_FloatRegister(), from);
3104 } else if (bt == T_DOUBLE) {
3105 __ ldrd(r_1->as_FloatRegister(), from);
3106 } else if (bt == T_OBJECT || bt == T_ARRAY) {
3107 assert_different_registers(r0, r_1->as_Register());
3108 __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3109 } else {
3110 assert(is_java_primitive(bt), "unexpected basic type");
3111 assert_different_registers(r0, r_1->as_Register());
3112 size_t size_in_bytes = type2aelembytes(bt);
3113 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3114 }
3115 j++;
3116 }
3117 assert(j == regs->length(), "missed a field?");
3118
3119 __ bind(skip);
3120
3121 __ ret(lr);
3122
3123 __ flush();
3124
3125 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3126 }
3127
3128 // Continuation point for throwing of implicit exceptions that are
3129 // not handled in the current activation. Fabricates an exception
3130 // oop and initiates normal exception dispatching in this
3131 // frame. Since we need to preserve callee-saved values (currently
3132 // only for C2, but done for C1 as well) we need a callee-saved oop
3133 // map and therefore have to make these stubs into RuntimeStubs
3134 // rather than BufferBlobs. If the compiler needs all registers to
3135 // be preserved between the fault point and the exception handler
3136 // then it must assume responsibility for that in
3137 // AbstractCompiler::continuation_for_implicit_null_exception or
3138 // continuation_for_implicit_division_by_zero_exception. All other
3139 // implicit exceptions (e.g., NullPointerException or
3140 // AbstractMethodError on entry) are either at call sites or
3141 // otherwise assume that stack unwinding will be initiated, so
3142 // caller saved registers were assumed volatile in the compiler.
3143
3144 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3145 assert(is_throw_id(id), "expected a throw stub id");
3146
3147 const char* name = SharedRuntime::stub_name(id);
3148
3149 // Information about frame layout at time of blocking runtime call.
3150 // Note that we only have to preserve callee-saved registers since
3151 // the compilers are responsible for supplying a continuation point
3152 // if they expect all registers to be preserved.
3153 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
3154 enum layout {
3155 rfp_off = 0,
3156 rfp_off2,
3157 return_off,
3158 return_off2,
3159 framesize // inclusive of return address
3160 };
3161
3162 int insts_size = 512;
3163 int locs_size = 64;
3164
3165 const char* timer_msg = "SharedRuntime generate_throw_exception";
3166 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3167
3168 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3169 if (blob != nullptr) {
3170 return blob->as_runtime_stub();
3171 }
3172
3173 ResourceMark rm;
3174 CodeBuffer code(name, insts_size, locs_size);
3175 OopMapSet* oop_maps = new OopMapSet();
3176 MacroAssembler* masm = new MacroAssembler(&code);
3177
3178 address start = __ pc();
3179
3180 // This is an inlined and slightly modified version of call_VM
3181 // which has the ability to fetch the return PC out of
3182 // thread-local storage and also sets up last_Java_sp slightly
3183 // differently than the real call_VM
3184
3185 __ enter(); // Save FP and LR before call
3186
3187 assert(is_even(framesize/2), "sp not 16-byte aligned");
3188
3189 // lr and fp are already in place
3190 __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
3191
3192 int frame_complete = __ pc() - start;
3193
3194 // Set up last_Java_sp and last_Java_fp
3195 address the_pc = __ pc();
3196 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3197
3198 __ mov(c_rarg0, rthread);
3199 BLOCK_COMMENT("call runtime_entry");
3200 __ lea(rscratch1, RuntimeAddress(runtime_entry));
3201 __ blr(rscratch1);
3202
3203 // Generate oop map
3204 OopMap* map = new OopMap(framesize, 0);
3205
3206 oop_maps->add_gc_map(the_pc - start, map);
3207
3208 __ reset_last_Java_frame(true);
3209
3210 // Reinitialize the ptrue predicate register, in case the external runtime
3211 // call clobbers ptrue reg, as we may return to SVE compiled code.
3212 __ reinitialize_ptrue();
3213
3214 __ leave();
3215
3216 // check for pending exceptions
3217 #ifdef ASSERT
3218 Label L;
3219 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3220 __ cbnz(rscratch1, L);
3221 __ should_not_reach_here();
3222 __ bind(L);
3223 #endif // ASSERT
3224 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3225
3226 // codeBlob framesize is in words (not VMRegImpl::slot_size)
3227 RuntimeStub* stub =
3228 RuntimeStub::new_runtime_stub(name,
3229 &code,
3230 frame_complete,
3231 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3232 oop_maps, false);
3233 AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3234
3235 return stub;
3236 }
3237
3238 #if INCLUDE_JFR
3239
3240 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
3241 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3242 __ mov(c_rarg0, thread);
3243 }
3244
3245 // The handle is dereferenced through a load barrier.
3246 static void jfr_epilogue(MacroAssembler* masm) {
3247 __ reset_last_Java_frame(true);
3248 }
3249
3250 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3251 // It returns a jobject handle to the event writer.
3252 // The handle is dereferenced and the return value is the event writer oop.
3253 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3254 enum layout {
3255 rbp_off,
3256 rbpH_off,
3257 return_off,
3258 return_off2,
3259 framesize // inclusive of return address
3260 };
3261
3262 int insts_size = 1024;
3263 int locs_size = 64;
3264 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
3265 CodeBuffer code(name, insts_size, locs_size);
3266 OopMapSet* oop_maps = new OopMapSet();
3267 MacroAssembler* masm = new MacroAssembler(&code);
3268
3269 address start = __ pc();
3270 __ enter();
3271 int frame_complete = __ pc() - start;
3272 address the_pc = __ pc();
3273 jfr_prologue(the_pc, masm, rthread);
3274 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
3275 jfr_epilogue(masm);
3276 __ resolve_global_jobject(r0, rscratch1, rscratch2);
3277 __ leave();
3278 __ ret(lr);
3279
3280 OopMap* map = new OopMap(framesize, 1); // rfp
3281 oop_maps->add_gc_map(the_pc - start, map);
3282
3283 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3284 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3285 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3286 oop_maps, false);
3287 return stub;
3288 }
3289
3290 // For c2: call to return a leased buffer.
3291 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3292 enum layout {
3293 rbp_off,
3294 rbpH_off,
3295 return_off,
3296 return_off2,
3297 framesize // inclusive of return address
3298 };
3299
3300 int insts_size = 1024;
3301 int locs_size = 64;
3302
3303 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
3304 CodeBuffer code(name, insts_size, locs_size);
3305 OopMapSet* oop_maps = new OopMapSet();
3306 MacroAssembler* masm = new MacroAssembler(&code);
3307
3308 address start = __ pc();
3309 __ enter();
3310 int frame_complete = __ pc() - start;
3311 address the_pc = __ pc();
3312 jfr_prologue(the_pc, masm, rthread);
3313 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
3314 jfr_epilogue(masm);
3315
3316 __ leave();
3317 __ ret(lr);
3318
3319 OopMap* map = new OopMap(framesize, 1); // rfp
3320 oop_maps->add_gc_map(the_pc - start, map);
3321
3322 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3323 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3324 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3325 oop_maps, false);
3326 return stub;
3327 }
3328
3329 #endif // INCLUDE_JFR