1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/debugInfoRec.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "frame_ppc.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "prims/methodHandles.hpp"
38 #include "runtime/continuation.hpp"
39 #include "runtime/continuationEntry.inline.hpp"
40 #include "runtime/jniHandles.hpp"
41 #include "runtime/os.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/signature.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/timerTrace.hpp"
47 #include "runtime/vframeArray.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/macros.hpp"
50 #include "vmreg_ppc.inline.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Runtime1.hpp"
53 #endif
54 #ifdef COMPILER2
55 #include "opto/ad.hpp"
56 #include "opto/runtime.hpp"
57 #endif
58
59 #include <alloca.h>
60
61 #define __ masm->
62
63 #ifdef PRODUCT
64 #define BLOCK_COMMENT(str) // nothing
65 #else
66 #define BLOCK_COMMENT(str) __ block_comment(str)
67 #endif
68
69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
70
71
72 class RegisterSaver {
73 // Used for saving volatile registers.
74 public:
75
76 // Support different return pc locations.
77 enum ReturnPCLocation {
78 return_pc_is_lr,
79 return_pc_is_pre_saved,
80 return_pc_is_thread_saved_exception_pc
81 };
82
83 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
84 int* out_frame_size_in_bytes,
85 bool generate_oop_map,
86 ReturnPCLocation return_pc_location,
87 bool save_vectors = false);
88 static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
89 int frame_size_in_bytes,
90 bool restore_ctr,
91 bool save_vectors = false);
92
93 static void push_frame_and_save_argument_registers(MacroAssembler* masm,
94 Register r_temp,
95 int frame_size,
96 int total_args,
97 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
98 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
99 int frame_size,
100 int total_args,
101 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
102
103 // During deoptimization only the result registers need to be restored
104 // all the other values have already been extracted.
105 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors);
106
107 // Constants and data structures:
108
109 typedef enum {
110 int_reg,
111 float_reg,
112 special_reg,
113 vec_reg
114 } RegisterType;
115
116 typedef enum {
117 reg_size = 8,
118 half_reg_size = reg_size / 2,
119 vec_reg_size = 16
120 } RegisterConstants;
121
122 typedef struct {
123 RegisterType reg_type;
124 int reg_num;
125 VMReg vmreg;
126 } LiveRegType;
127 };
128
129
130 #define RegisterSaver_LiveIntReg(regname) \
131 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() }
132
133 #define RegisterSaver_LiveFloatReg(regname) \
134 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() }
135
136 #define RegisterSaver_LiveSpecialReg(regname) \
137 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
138
139 #define RegisterSaver_LiveVecReg(regname) \
140 { RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() }
141
142 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
143 // Live registers which get spilled to the stack. Register
144 // positions in this array correspond directly to the stack layout.
145
146 //
147 // live special registers:
148 //
149 RegisterSaver_LiveSpecialReg(SR_CTR),
150 //
151 // live float registers:
152 //
153 RegisterSaver_LiveFloatReg( F0 ),
154 RegisterSaver_LiveFloatReg( F1 ),
155 RegisterSaver_LiveFloatReg( F2 ),
156 RegisterSaver_LiveFloatReg( F3 ),
157 RegisterSaver_LiveFloatReg( F4 ),
158 RegisterSaver_LiveFloatReg( F5 ),
159 RegisterSaver_LiveFloatReg( F6 ),
160 RegisterSaver_LiveFloatReg( F7 ),
161 RegisterSaver_LiveFloatReg( F8 ),
162 RegisterSaver_LiveFloatReg( F9 ),
163 RegisterSaver_LiveFloatReg( F10 ),
164 RegisterSaver_LiveFloatReg( F11 ),
165 RegisterSaver_LiveFloatReg( F12 ),
166 RegisterSaver_LiveFloatReg( F13 ),
167 RegisterSaver_LiveFloatReg( F14 ),
168 RegisterSaver_LiveFloatReg( F15 ),
169 RegisterSaver_LiveFloatReg( F16 ),
170 RegisterSaver_LiveFloatReg( F17 ),
171 RegisterSaver_LiveFloatReg( F18 ),
172 RegisterSaver_LiveFloatReg( F19 ),
173 RegisterSaver_LiveFloatReg( F20 ),
174 RegisterSaver_LiveFloatReg( F21 ),
175 RegisterSaver_LiveFloatReg( F22 ),
176 RegisterSaver_LiveFloatReg( F23 ),
177 RegisterSaver_LiveFloatReg( F24 ),
178 RegisterSaver_LiveFloatReg( F25 ),
179 RegisterSaver_LiveFloatReg( F26 ),
180 RegisterSaver_LiveFloatReg( F27 ),
181 RegisterSaver_LiveFloatReg( F28 ),
182 RegisterSaver_LiveFloatReg( F29 ),
183 RegisterSaver_LiveFloatReg( F30 ),
184 RegisterSaver_LiveFloatReg( F31 ),
185 //
186 // live integer registers:
187 //
188 RegisterSaver_LiveIntReg( R0 ),
189 //RegisterSaver_LiveIntReg( R1 ), // stack pointer
190 RegisterSaver_LiveIntReg( R2 ),
191 RegisterSaver_LiveIntReg( R3 ),
192 RegisterSaver_LiveIntReg( R4 ),
193 RegisterSaver_LiveIntReg( R5 ),
194 RegisterSaver_LiveIntReg( R6 ),
195 RegisterSaver_LiveIntReg( R7 ),
196 RegisterSaver_LiveIntReg( R8 ),
197 RegisterSaver_LiveIntReg( R9 ),
198 RegisterSaver_LiveIntReg( R10 ),
199 RegisterSaver_LiveIntReg( R11 ),
200 RegisterSaver_LiveIntReg( R12 ),
201 //RegisterSaver_LiveIntReg( R13 ), // system thread id
202 RegisterSaver_LiveIntReg( R14 ),
203 RegisterSaver_LiveIntReg( R15 ),
204 RegisterSaver_LiveIntReg( R16 ),
205 RegisterSaver_LiveIntReg( R17 ),
206 RegisterSaver_LiveIntReg( R18 ),
207 RegisterSaver_LiveIntReg( R19 ),
208 RegisterSaver_LiveIntReg( R20 ),
209 RegisterSaver_LiveIntReg( R21 ),
210 RegisterSaver_LiveIntReg( R22 ),
211 RegisterSaver_LiveIntReg( R23 ),
212 RegisterSaver_LiveIntReg( R24 ),
213 RegisterSaver_LiveIntReg( R25 ),
214 RegisterSaver_LiveIntReg( R26 ),
215 RegisterSaver_LiveIntReg( R27 ),
216 RegisterSaver_LiveIntReg( R28 ),
217 RegisterSaver_LiveIntReg( R29 ),
218 RegisterSaver_LiveIntReg( R30 ),
219 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below)
220 };
221
222 static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
223 //
224 // live vector registers (optional, only these ones are used by C2):
225 //
226 RegisterSaver_LiveVecReg( VR0 ),
227 RegisterSaver_LiveVecReg( VR1 ),
228 RegisterSaver_LiveVecReg( VR2 ),
229 RegisterSaver_LiveVecReg( VR3 ),
230 RegisterSaver_LiveVecReg( VR4 ),
231 RegisterSaver_LiveVecReg( VR5 ),
232 RegisterSaver_LiveVecReg( VR6 ),
233 RegisterSaver_LiveVecReg( VR7 ),
234 RegisterSaver_LiveVecReg( VR8 ),
235 RegisterSaver_LiveVecReg( VR9 ),
236 RegisterSaver_LiveVecReg( VR10 ),
237 RegisterSaver_LiveVecReg( VR11 ),
238 RegisterSaver_LiveVecReg( VR12 ),
239 RegisterSaver_LiveVecReg( VR13 ),
240 RegisterSaver_LiveVecReg( VR14 ),
241 RegisterSaver_LiveVecReg( VR15 ),
242 RegisterSaver_LiveVecReg( VR16 ),
243 RegisterSaver_LiveVecReg( VR17 ),
244 RegisterSaver_LiveVecReg( VR18 ),
245 RegisterSaver_LiveVecReg( VR19 ),
246 RegisterSaver_LiveVecReg( VR20 ),
247 RegisterSaver_LiveVecReg( VR21 ),
248 RegisterSaver_LiveVecReg( VR22 ),
249 RegisterSaver_LiveVecReg( VR23 ),
250 RegisterSaver_LiveVecReg( VR24 ),
251 RegisterSaver_LiveVecReg( VR25 ),
252 RegisterSaver_LiveVecReg( VR26 ),
253 RegisterSaver_LiveVecReg( VR27 ),
254 RegisterSaver_LiveVecReg( VR28 ),
255 RegisterSaver_LiveVecReg( VR29 ),
256 RegisterSaver_LiveVecReg( VR30 ),
257 RegisterSaver_LiveVecReg( VR31 )
258 };
259
260
261 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
262 int* out_frame_size_in_bytes,
263 bool generate_oop_map,
264 ReturnPCLocation return_pc_location,
265 bool save_vectors) {
266 // Push an abi_reg_args-frame and store all registers which may be live.
267 // If requested, create an OopMap: Record volatile registers as
268 // callee-save values in an OopMap so their save locations will be
269 // propagated to the RegisterMap of the caller frame during
270 // StackFrameStream construction (needed for deoptimization; see
271 // compiledVFrame::create_stack_value).
272 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
273
274 // calculate frame size
275 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
276 sizeof(RegisterSaver::LiveRegType);
277 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
278 sizeof(RegisterSaver::LiveRegType))
279 : 0;
280 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
281 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
282 + frame::native_abi_reg_args_size;
283
284 *out_frame_size_in_bytes = frame_size_in_bytes;
285 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
286 const int register_save_offset = frame_size_in_bytes - register_save_size;
287
288 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
289 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr;
290
291 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
292
293 // push a new frame
294 __ push_frame(frame_size_in_bytes, noreg);
295
296 // Save some registers in the last (non-vector) slots of the new frame so we
297 // can use them as scratch regs or to determine the return pc.
298 __ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
299 __ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP);
300
301 // save the flags
302 // Do the save_LR by hand and adjust the return pc if requested.
303 switch (return_pc_location) {
304 case return_pc_is_lr: __ mflr(R31); break;
305 case return_pc_is_pre_saved: break;
306 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
307 default: ShouldNotReachHere();
308 }
309 if (return_pc_location != return_pc_is_pre_saved) {
310 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
311 }
312
313 // save all registers (ints and floats)
314 int offset = register_save_offset;
315
316 for (int i = 0; i < regstosave_num; i++) {
317 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
318 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
319
320 switch (reg_type) {
321 case RegisterSaver::int_reg: {
322 if (reg_num < 30) { // We spilled R30-31 right at the beginning.
323 __ std(as_Register(reg_num), offset, R1_SP);
324 }
325 break;
326 }
327 case RegisterSaver::float_reg: {
328 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
329 break;
330 }
331 case RegisterSaver::special_reg: {
332 if (reg_num == SR_CTR.encoding()) {
333 __ mfctr(R30);
334 __ std(R30, offset, R1_SP);
335 } else {
336 Unimplemented();
337 }
338 break;
339 }
340 default:
341 ShouldNotReachHere();
342 }
343
344 if (generate_oop_map) {
345 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
346 RegisterSaver_LiveRegs[i].vmreg);
347 }
348 offset += reg_size;
349 }
350
351 // Note that generate_oop_map in the following loop is only used for the
352 // polling_page_vectors_safepoint_handler_blob and the deopt_blob.
353 // The order in which the vector contents are stored depends on Endianess and
354 // the utilized instructions (PowerArchitecturePPC64).
355 assert(is_aligned(offset, StackAlignmentInBytes), "should be");
356 if (PowerArchitecturePPC64 >= 10) {
357 assert(is_even(vecregstosave_num), "expectation");
358 for (int i = 0; i < vecregstosave_num; i += 2) {
359 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
360 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
361
362 __ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
363 // Note: The contents were read in the same order (see loadV16 node in ppc.ad).
364 // RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
365 if (generate_oop_map) {
366 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
367 RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
368 map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2),
369 RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
370 }
371 offset += (2 * vec_reg_size);
372 }
373 } else {
374 for (int i = 0; i < vecregstosave_num; i++) {
375 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
376
377 __ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP);
378 // Note: The contents were read in the same order (see loadV16 node in ppc.ad).
379 // RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
380 if (generate_oop_map) {
381 VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg;
382 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr);
383 }
384 offset += vec_reg_size;
385 }
386 }
387
388 assert(offset == frame_size_in_bytes, "consistency check");
389
390 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
391
392 // And we're done.
393 return map;
394 }
395
396
397 // Pop the current frame and restore all the registers that we
398 // saved.
399 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
400 int frame_size_in_bytes,
401 bool restore_ctr,
402 bool save_vectors) {
403 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
404 sizeof(RegisterSaver::LiveRegType);
405 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
406 sizeof(RegisterSaver::LiveRegType))
407 : 0;
408 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
409
410 const int register_save_offset = frame_size_in_bytes - register_save_size;
411
412 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
413
414 // restore all registers (ints and floats)
415 int offset = register_save_offset;
416
417 for (int i = 0; i < regstosave_num; i++) {
418 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
419 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
420
421 switch (reg_type) {
422 case RegisterSaver::int_reg: {
423 if (reg_num != 31) // R31 restored at the end, it's the tmp reg!
424 __ ld(as_Register(reg_num), offset, R1_SP);
425 break;
426 }
427 case RegisterSaver::float_reg: {
428 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
429 break;
430 }
431 case RegisterSaver::special_reg: {
432 if (reg_num == SR_CTR.encoding()) {
433 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
434 __ ld(R31, offset, R1_SP);
435 __ mtctr(R31);
436 }
437 } else {
438 Unimplemented();
439 }
440 break;
441 }
442 default:
443 ShouldNotReachHere();
444 }
445 offset += reg_size;
446 }
447
448 assert(is_aligned(offset, StackAlignmentInBytes), "should be");
449 if (PowerArchitecturePPC64 >= 10) {
450 for (int i = 0; i < vecregstosave_num; i += 2) {
451 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
452 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
453
454 __ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
455
456 offset += (2 * vec_reg_size);
457 }
458 } else {
459 for (int i = 0; i < vecregstosave_num; i++) {
460 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
461
462 __ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
463
464 offset += vec_reg_size;
465 }
466 }
467
468 assert(offset == frame_size_in_bytes, "consistency check");
469
470 // restore link and the flags
471 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
472 __ mtlr(R31);
473
474 // restore scratch register's value
475 __ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
476
477 // pop the frame
478 __ addi(R1_SP, R1_SP, frame_size_in_bytes);
479
480 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
481 }
482
483 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
484 int frame_size,int total_args, const VMRegPair *regs,
485 const VMRegPair *regs2) {
486 __ push_frame(frame_size, r_temp);
487 int st_off = frame_size - wordSize;
488 for (int i = 0; i < total_args; i++) {
489 VMReg r_1 = regs[i].first();
490 VMReg r_2 = regs[i].second();
491 if (!r_1->is_valid()) {
492 assert(!r_2->is_valid(), "");
493 continue;
494 }
495 if (r_1->is_Register()) {
496 Register r = r_1->as_Register();
497 __ std(r, st_off, R1_SP);
498 st_off -= wordSize;
499 } else if (r_1->is_FloatRegister()) {
500 FloatRegister f = r_1->as_FloatRegister();
501 __ stfd(f, st_off, R1_SP);
502 st_off -= wordSize;
503 }
504 }
505 if (regs2 != nullptr) {
506 for (int i = 0; i < total_args; i++) {
507 VMReg r_1 = regs2[i].first();
508 VMReg r_2 = regs2[i].second();
509 if (!r_1->is_valid()) {
510 assert(!r_2->is_valid(), "");
511 continue;
512 }
513 if (r_1->is_Register()) {
514 Register r = r_1->as_Register();
515 __ std(r, st_off, R1_SP);
516 st_off -= wordSize;
517 } else if (r_1->is_FloatRegister()) {
518 FloatRegister f = r_1->as_FloatRegister();
519 __ stfd(f, st_off, R1_SP);
520 st_off -= wordSize;
521 }
522 }
523 }
524 }
525
526 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
527 int total_args, const VMRegPair *regs,
528 const VMRegPair *regs2) {
529 int st_off = frame_size - wordSize;
530 for (int i = 0; i < total_args; i++) {
531 VMReg r_1 = regs[i].first();
532 VMReg r_2 = regs[i].second();
533 if (r_1->is_Register()) {
534 Register r = r_1->as_Register();
535 __ ld(r, st_off, R1_SP);
536 st_off -= wordSize;
537 } else if (r_1->is_FloatRegister()) {
538 FloatRegister f = r_1->as_FloatRegister();
539 __ lfd(f, st_off, R1_SP);
540 st_off -= wordSize;
541 }
542 }
543 if (regs2 != nullptr)
544 for (int i = 0; i < total_args; i++) {
545 VMReg r_1 = regs2[i].first();
546 VMReg r_2 = regs2[i].second();
547 if (r_1->is_Register()) {
548 Register r = r_1->as_Register();
549 __ ld(r, st_off, R1_SP);
550 st_off -= wordSize;
551 } else if (r_1->is_FloatRegister()) {
552 FloatRegister f = r_1->as_FloatRegister();
553 __ lfd(f, st_off, R1_SP);
554 st_off -= wordSize;
555 }
556 }
557 __ pop_frame();
558 }
559
560 // Restore the registers that might be holding a result.
561 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors) {
562 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
563 sizeof(RegisterSaver::LiveRegType);
564 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
565 sizeof(RegisterSaver::LiveRegType))
566 : 0;
567 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
568
569 const int register_save_offset = frame_size_in_bytes - register_save_size;
570
571 // restore all result registers (ints and floats)
572 int offset = register_save_offset;
573 for (int i = 0; i < regstosave_num; i++) {
574 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
575 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
576 switch (reg_type) {
577 case RegisterSaver::int_reg: {
578 if (as_Register(reg_num)==R3_RET) // int result_reg
579 __ ld(as_Register(reg_num), offset, R1_SP);
580 break;
581 }
582 case RegisterSaver::float_reg: {
583 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
584 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
585 break;
586 }
587 case RegisterSaver::special_reg: {
588 // Special registers don't hold a result.
589 break;
590 }
591 default:
592 ShouldNotReachHere();
593 }
594 offset += reg_size;
595 }
596
597 assert(offset == frame_size_in_bytes - (save_vectors ? vecregstosave_num * vec_reg_size : 0), "consistency check");
598 }
599
600 // Is vector's size (in bytes) bigger than a size saved by default?
601 bool SharedRuntime::is_wide_vector(int size) {
602 // Note, MaxVectorSize == 8/16 on PPC64.
603 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
604 return size > 8;
605 }
606
607 static int reg2slot(VMReg r) {
608 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
609 }
610
611 static int reg2offset(VMReg r) {
612 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
613 }
614
615 // ---------------------------------------------------------------------------
616 // Read the array of BasicTypes from a signature, and compute where the
617 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
618 // quantities. Values less than VMRegImpl::stack0 are registers, those above
619 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
620 // as framesizes are fixed.
621 // VMRegImpl::stack0 refers to the first slot 0(sp).
622 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
623 // up to Register::number_of_registers) are the 64-bit
624 // integer registers.
625
626 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
627 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
628 // units regardless of build. Of course for i486 there is no 64 bit build
629
630 // The Java calling convention is a "shifted" version of the C ABI.
631 // By skipping the first C ABI register we can call non-static jni methods
632 // with small numbers of arguments without having to shuffle the arguments
633 // at all. Since we control the java ABI we ought to at least get some
634 // advantage out of it.
635
636 const VMReg java_iarg_reg[8] = {
637 R3->as_VMReg(),
638 R4->as_VMReg(),
639 R5->as_VMReg(),
640 R6->as_VMReg(),
641 R7->as_VMReg(),
642 R8->as_VMReg(),
643 R9->as_VMReg(),
644 R10->as_VMReg()
645 };
646
647 const VMReg java_farg_reg[13] = {
648 F1->as_VMReg(),
649 F2->as_VMReg(),
650 F3->as_VMReg(),
651 F4->as_VMReg(),
652 F5->as_VMReg(),
653 F6->as_VMReg(),
654 F7->as_VMReg(),
655 F8->as_VMReg(),
656 F9->as_VMReg(),
657 F10->as_VMReg(),
658 F11->as_VMReg(),
659 F12->as_VMReg(),
660 F13->as_VMReg()
661 };
662
663 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
664 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
665
666 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j);
667 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j);
668
669 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
670 VMRegPair *regs,
671 int total_args_passed) {
672 // C2c calling conventions for compiled-compiled calls.
673 // Put 8 ints/longs into registers _AND_ 13 float/doubles into
674 // registers _AND_ put the rest on the stack.
675
676 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats
677 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
678
679 int i;
680 VMReg reg;
681 int stk = 0;
682 int ireg = 0;
683 int freg = 0;
684
685 // We put the first 8 arguments into registers and the rest on the
686 // stack, float arguments are already in their argument registers
687 // due to c2c calling conventions (see calling_convention).
688 for (int i = 0; i < total_args_passed; ++i) {
689 switch(sig_bt[i]) {
690 case T_BOOLEAN:
691 case T_CHAR:
692 case T_BYTE:
693 case T_SHORT:
694 case T_INT:
695 if (ireg < num_java_iarg_registers) {
696 // Put int/ptr in register
697 reg = java_iarg_reg[ireg];
698 ++ireg;
699 } else {
700 // Put int/ptr on stack.
701 reg = VMRegImpl::stack2reg(stk);
702 stk += inc_stk_for_intfloat;
703 }
704 regs[i].set1(reg);
705 break;
706 case T_LONG:
707 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
708 if (ireg < num_java_iarg_registers) {
709 // Put long in register.
710 reg = java_iarg_reg[ireg];
711 ++ireg;
712 } else {
713 // Put long on stack. They must be aligned to 2 slots.
714 if (stk & 0x1) ++stk;
715 reg = VMRegImpl::stack2reg(stk);
716 stk += inc_stk_for_longdouble;
717 }
718 regs[i].set2(reg);
719 break;
720 case T_OBJECT:
721 case T_ARRAY:
722 case T_ADDRESS:
723 if (ireg < num_java_iarg_registers) {
724 // Put ptr in register.
725 reg = java_iarg_reg[ireg];
726 ++ireg;
727 } else {
728 // Put ptr on stack. Objects must be aligned to 2 slots too,
729 // because "64-bit pointers record oop-ishness on 2 aligned
730 // adjacent registers." (see OopFlow::build_oop_map).
731 if (stk & 0x1) ++stk;
732 reg = VMRegImpl::stack2reg(stk);
733 stk += inc_stk_for_longdouble;
734 }
735 regs[i].set2(reg);
736 break;
737 case T_FLOAT:
738 if (freg < num_java_farg_registers) {
739 // Put float in register.
740 reg = java_farg_reg[freg];
741 ++freg;
742 } else {
743 // Put float on stack.
744 reg = VMRegImpl::stack2reg(stk);
745 stk += inc_stk_for_intfloat;
746 }
747 regs[i].set1(reg);
748 break;
749 case T_DOUBLE:
750 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
751 if (freg < num_java_farg_registers) {
752 // Put double in register.
753 reg = java_farg_reg[freg];
754 ++freg;
755 } else {
756 // Put double on stack. They must be aligned to 2 slots.
757 if (stk & 0x1) ++stk;
758 reg = VMRegImpl::stack2reg(stk);
759 stk += inc_stk_for_longdouble;
760 }
761 regs[i].set2(reg);
762 break;
763 case T_VOID:
764 // Do not count halves.
765 regs[i].set_bad();
766 break;
767 default:
768 ShouldNotReachHere();
769 }
770 }
771 return stk;
772 }
773
774 // Calling convention for calling C code.
775 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
776 VMRegPair *regs,
777 int total_args_passed) {
778 // Calling conventions for C runtime calls and calls to JNI native methods.
779 //
780 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
781 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
782 // the first 13 flt/dbl's in the first 13 fp regs but additionally
783 // copy flt/dbl to the stack if they are beyond the 8th argument.
784
785 const VMReg iarg_reg[8] = {
786 R3->as_VMReg(),
787 R4->as_VMReg(),
788 R5->as_VMReg(),
789 R6->as_VMReg(),
790 R7->as_VMReg(),
791 R8->as_VMReg(),
792 R9->as_VMReg(),
793 R10->as_VMReg()
794 };
795
796 const VMReg farg_reg[13] = {
797 F1->as_VMReg(),
798 F2->as_VMReg(),
799 F3->as_VMReg(),
800 F4->as_VMReg(),
801 F5->as_VMReg(),
802 F6->as_VMReg(),
803 F7->as_VMReg(),
804 F8->as_VMReg(),
805 F9->as_VMReg(),
806 F10->as_VMReg(),
807 F11->as_VMReg(),
808 F12->as_VMReg(),
809 F13->as_VMReg()
810 };
811
812 // Check calling conventions consistency.
813 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
814 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
815 "consistency");
816
817 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size)
818 / VMRegImpl::stack_slot_size);
819 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size;
820
821 VMReg reg;
822 int arg = 0;
823 int freg = 0;
824 bool stack_used = false;
825
826 for (int i = 0; i < total_args_passed; ++i, ++arg) {
827 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted)
828 int stk = (arg * 2) + additional_frame_header_slots;
829
830 switch(sig_bt[i]) {
831 //
832 // If arguments 0-7 are integers, they are passed in integer registers.
833 // Argument i is placed in iarg_reg[i].
834 //
835 case T_BOOLEAN:
836 case T_CHAR:
837 case T_BYTE:
838 case T_SHORT:
839 case T_INT:
840 // We must cast ints to longs and use full 64 bit stack slots
841 // here. Thus fall through, handle as long.
842 case T_LONG:
843 case T_OBJECT:
844 case T_ARRAY:
845 case T_ADDRESS:
846 case T_METADATA:
847 // Oops are already boxed if required (JNI).
848 if (arg < Argument::n_int_register_parameters_c) {
849 reg = iarg_reg[arg];
850 } else {
851 reg = VMRegImpl::stack2reg(stk);
852 stack_used = true;
853 }
854 regs[i].set2(reg);
855 break;
856
857 //
858 // Floats are treated differently from int regs: The first 13 float arguments
859 // are passed in registers (not the float args among the first 13 args).
860 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed
861 // in farg_reg[j] if argument i is the j-th float argument of this call.
862 //
863 case T_FLOAT:
864 if (freg < Argument::n_float_register_parameters_c) {
865 // Put float in register ...
866 reg = farg_reg[freg];
867 ++freg;
868 } else {
869 // Put float on stack.
870 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots);
871 stack_used = true;
872 }
873 regs[i].set1(reg);
874 break;
875 case T_DOUBLE:
876 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
877 if (freg < Argument::n_float_register_parameters_c) {
878 // Put double in register ...
879 reg = farg_reg[freg];
880 ++freg;
881 } else {
882 // Put double on stack.
883 reg = VMRegImpl::stack2reg(stk);
884 stack_used = true;
885 }
886 regs[i].set2(reg);
887 break;
888
889 case T_VOID:
890 // Do not count halves.
891 regs[i].set_bad();
892 --arg;
893 break;
894 default:
895 ShouldNotReachHere();
896 }
897 }
898
899 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots.
900 #if defined(ABI_ELFv2)
901 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots");
902 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype
903 // indicates that all parameters can be passed in registers.
904 return stack_used ? (arg * 2) : 0;
905 #else
906 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1.
907 // We have to add extra slots because ABIv1 uses a larger header.
908 return MAX2(arg, 8) * 2 + additional_frame_header_slots;
909 #endif
910 }
911
912 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
913 uint num_bits,
914 uint total_args_passed) {
915 Unimplemented();
916 return 0;
917 }
918
919 static address gen_c2i_adapter(MacroAssembler *masm,
920 int comp_args_on_stack,
921 const GrowableArray<SigEntry>* sig,
922 const VMRegPair *regs,
923 Label& call_interpreter,
924 const Register& ientry) {
925
926 address c2i_entrypoint;
927
928 const Register sender_SP = R21_sender_SP; // == R21_tmp1
929 const Register code = R22_tmp2;
930 //const Register ientry = R23_tmp3;
931 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
932 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
933 int value_regs_index = 0;
934 int total_args_passed = sig->length();
935
936 const Register return_pc = R27_tmp7;
937 const Register tmp = R28_tmp8;
938
939 assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
940
941 // Adapter needs TOP_IJAVA_FRAME_ABI.
942 const int adapter_size = frame::top_ijava_frame_abi_size +
943 align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
944
945 // regular (verified) c2i entry point
946 c2i_entrypoint = __ pc();
947
948 // Does compiled code exists? If yes, patch the caller's callsite.
949 __ ld(code, method_(code));
950 __ cmpdi(CR0, code, 0);
951 __ ld(ientry, method_(interpreter_entry)); // preloaded
952 __ beq(CR0, call_interpreter);
953
954
955 // Patch caller's callsite, method_(code) was not null which means that
956 // compiled code exists.
957 __ mflr(return_pc);
958 __ std(return_pc, _abi0(lr), R1_SP);
959 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
960
961 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
962
963 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
964 __ ld(return_pc, _abi0(lr), R1_SP);
965 __ ld(ientry, method_(interpreter_entry)); // preloaded
966 __ mtlr(return_pc);
967
968
969 // Call the interpreter.
970 __ BIND(call_interpreter);
971 __ mtctr(ientry);
972
973 // Get a copy of the current SP for loading caller's arguments.
974 __ mr(sender_SP, R1_SP);
975
976 // Add space for the adapter.
977 __ resize_frame(-adapter_size, R12_scratch2);
978
979 int st_off = adapter_size - wordSize;
980
981 // Write the args into the outgoing interpreter space.
982 for (int i = 0; i < total_args_passed; i++) {
983 BasicType bt = sig->at(i)._bt;
984
985 VMReg r_1 = regs[i].first();
986 VMReg r_2 = regs[i].second();
987 if (!r_1->is_valid()) {
988 assert(!r_2->is_valid(), "");
989 continue;
990 }
991 if (r_1->is_stack()) {
992 Register tmp_reg = value_regs[value_regs_index];
993 value_regs_index = (value_regs_index + 1) % num_value_regs;
994 // The calling convention produces OptoRegs that ignore the out
995 // preserve area (JIT's ABI). We must account for it here.
996 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
997 if (!r_2->is_valid()) {
998 __ lwz(tmp_reg, ld_off, sender_SP);
999 } else {
1000 __ ld(tmp_reg, ld_off, sender_SP);
1001 }
1002 // Pretend stack targets were loaded into tmp_reg.
1003 r_1 = tmp_reg->as_VMReg();
1004 }
1005
1006 if (r_1->is_Register()) {
1007 Register r = r_1->as_Register();
1008 if (!r_2->is_valid()) {
1009 __ stw(r, st_off, R1_SP);
1010 st_off-=wordSize;
1011 } else {
1012 // Longs are given 2 64-bit slots in the interpreter, but the
1013 // data is passed in only 1 slot.
1014 if (bt == T_LONG || bt == T_DOUBLE) {
1015 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1016 st_off-=wordSize;
1017 }
1018 __ std(r, st_off, R1_SP);
1019 st_off-=wordSize;
1020 }
1021 } else {
1022 assert(r_1->is_FloatRegister(), "");
1023 FloatRegister f = r_1->as_FloatRegister();
1024 if (!r_2->is_valid()) {
1025 __ stfs(f, st_off, R1_SP);
1026 st_off-=wordSize;
1027 } else {
1028 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
1029 // data is passed in only 1 slot.
1030 // One of these should get known junk...
1031 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1032 st_off-=wordSize;
1033 __ stfd(f, st_off, R1_SP);
1034 st_off-=wordSize;
1035 }
1036 }
1037 }
1038
1039 // Jump to the interpreter just as if interpreter was doing it.
1040
1041 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
1042
1043 // load TOS
1044 __ addi(R15_esp, R1_SP, st_off);
1045
1046 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
1047 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
1048 __ bctr();
1049
1050 return c2i_entrypoint;
1051 }
1052
1053 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
1054
1055 // Load method's entry-point from method.
1056 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
1057 __ mtctr(R12_scratch2);
1058
1059 // We will only enter here from an interpreted frame and never from after
1060 // passing thru a c2i. Azul allowed this but we do not. If we lose the
1061 // race and use a c2i we will remain interpreted for the race loser(s).
1062 // This removes all sorts of headaches on the x86 side and also eliminates
1063 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
1064
1065 // Note: r13 contains the senderSP on entry. We must preserve it since
1066 // we may do a i2c -> c2i transition if we lose a race where compiled
1067 // code goes non-entrant while we get args ready.
1068 // In addition we use r13 to locate all the interpreter args as
1069 // we must align the stack to 16 bytes on an i2c entry else we
1070 // lose alignment we expect in all compiled code and register
1071 // save code can segv when fxsave instructions find improperly
1072 // aligned stack pointer.
1073
1074 const Register ld_ptr = R15_esp;
1075 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
1076 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
1077 int value_regs_index = 0;
1078
1079 int total_args_passed = sig->length();
1080 int ld_offset = total_args_passed*wordSize;
1081
1082 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
1083 // in registers, we will occasionally have no stack args.
1084 int comp_words_on_stack = 0;
1085 if (comp_args_on_stack) {
1086 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
1087 // registers are below. By subtracting stack0, we either get a negative
1088 // number (all values in registers) or the maximum stack slot accessed.
1089
1090 // Convert 4-byte c2 stack slots to words.
1091 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1092 // Round up to miminum stack alignment, in wordSize.
1093 comp_words_on_stack = align_up(comp_words_on_stack, 2);
1094 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
1095 }
1096
1097 // Now generate the shuffle code. Pick up all register args and move the
1098 // rest through register value=Z_R12.
1099 BLOCK_COMMENT("Shuffle arguments");
1100
1101 for (int i = 0; i < total_args_passed; i++) {
1102 BasicType bt = sig->at(i)._bt;
1103 if (bt == T_VOID) {
1104 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
1105 continue;
1106 }
1107
1108 // Pick up 0, 1 or 2 words from ld_ptr.
1109 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1110 "scrambled load targets?");
1111 VMReg r_1 = regs[i].first();
1112 VMReg r_2 = regs[i].second();
1113 if (!r_1->is_valid()) {
1114 assert(!r_2->is_valid(), "");
1115 continue;
1116 }
1117 if (r_1->is_FloatRegister()) {
1118 if (!r_2->is_valid()) {
1119 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
1120 ld_offset-=wordSize;
1121 } else {
1122 // Skip the unused interpreter slot.
1123 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
1124 ld_offset-=2*wordSize;
1125 }
1126 } else {
1127 Register r;
1128 if (r_1->is_stack()) {
1129 // Must do a memory to memory move thru "value".
1130 r = value_regs[value_regs_index];
1131 value_regs_index = (value_regs_index + 1) % num_value_regs;
1132 } else {
1133 r = r_1->as_Register();
1134 }
1135 if (!r_2->is_valid()) {
1136 // Not sure we need to do this but it shouldn't hurt.
1137 if (is_reference_type(bt) || bt == T_ADDRESS) {
1138 __ ld(r, ld_offset, ld_ptr);
1139 ld_offset-=wordSize;
1140 } else {
1141 __ lwz(r, ld_offset, ld_ptr);
1142 ld_offset-=wordSize;
1143 }
1144 } else {
1145 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
1146 // data is passed in only 1 slot.
1147 if (bt == T_LONG || bt == T_DOUBLE) {
1148 ld_offset-=wordSize;
1149 }
1150 __ ld(r, ld_offset, ld_ptr);
1151 ld_offset-=wordSize;
1152 }
1153
1154 if (r_1->is_stack()) {
1155 // Now store value where the compiler expects it
1156 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
1157
1158 if (bt == T_INT || bt == T_FLOAT || bt == T_BOOLEAN ||
1159 bt == T_SHORT || bt == T_CHAR || bt == T_BYTE) {
1160 __ stw(r, st_off, R1_SP);
1161 } else {
1162 __ std(r, st_off, R1_SP);
1163 }
1164 }
1165 }
1166 }
1167
1168 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1169
1170 BLOCK_COMMENT("Store method");
1171 // Store method into thread->callee_target.
1172 // We might end up in handle_wrong_method if the callee is
1173 // deoptimized as we race thru here. If that happens we don't want
1174 // to take a safepoint because the caller frame will look
1175 // interpreted and arguments are now "compiled" so it is much better
1176 // to make this transition invisible to the stack walking
1177 // code. Unfortunately if we try and find the callee by normal means
1178 // a safepoint is possible. So we stash the desired callee in the
1179 // thread and the vm will find there should this case occur.
1180 __ std(R19_method, thread_(callee_target));
1181
1182 // Jump to the compiled code just as if compiled code was doing it.
1183 __ bctr();
1184 }
1185
1186 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1187 int comp_args_on_stack,
1188 const GrowableArray<SigEntry>* sig,
1189 const VMRegPair* regs,
1190 const GrowableArray<SigEntry>* sig_cc,
1191 const VMRegPair* regs_cc,
1192 const GrowableArray<SigEntry>* sig_cc_ro,
1193 const VMRegPair* regs_cc_ro,
1194 address entry_address[AdapterBlob::ENTRY_COUNT],
1195 AdapterBlob*& new_adapter,
1196 bool allocate_code_blob) {
1197 // entry: i2c
1198
1199 __ align(CodeEntryAlignment);
1200 entry_address[AdapterBlob::I2C] = __ pc();
1201 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1202
1203
1204 // entry: c2i unverified
1205
1206 __ align(CodeEntryAlignment);
1207 BLOCK_COMMENT("c2i unverified entry");
1208 entry_address[AdapterBlob::C2I_Unverified] = __ pc();
1209
1210 // inline_cache contains a CompiledICData
1211 const Register ic = R19_inline_cache_reg;
1212 const Register ic_klass = R11_scratch1;
1213 const Register receiver_klass = R12_scratch2;
1214 const Register code = R21_tmp1;
1215 const Register ientry = R23_tmp3;
1216
1217 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
1218 assert(R11_scratch1 == R11, "need prologue scratch register");
1219
1220 Label call_interpreter;
1221
1222 __ ic_check(4 /* end_alignment */);
1223 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic);
1224 // Argument is valid and klass is as expected, continue.
1225
1226 __ ld(code, method_(code));
1227 __ cmpdi(CR0, code, 0);
1228 __ ld(ientry, method_(interpreter_entry)); // preloaded
1229 __ beq_predict_taken(CR0, call_interpreter);
1230
1231 // Branch to ic_miss_stub.
1232 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1233
1234 // entry: c2i
1235
1236 entry_address[AdapterBlob::C2I] = __ pc();
1237
1238 // Class initialization barrier for static methods
1239 entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
1240 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
1241 Label L_skip_barrier;
1242
1243 // Bypass the barrier for non-static methods
1244 __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
1245 __ andi_(R0, R0, JVM_ACC_STATIC);
1246 __ beq(CR0, L_skip_barrier); // non-static
1247
1248 Register klass = R11_scratch1;
1249 __ load_method_holder(klass, R19_method);
1250 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1251
1252 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1253 __ mtctr(klass);
1254 __ bctr();
1255
1256 __ bind(L_skip_barrier);
1257 entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
1258
1259 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1260 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
1261
1262 gen_c2i_adapter(masm, comp_args_on_stack, sig, regs, call_interpreter, ientry);
1263 return;
1264 }
1265
1266 // An oop arg. Must pass a handle not the oop itself.
1267 static void object_move(MacroAssembler* masm,
1268 int frame_size_in_slots,
1269 OopMap* oop_map, int oop_handle_offset,
1270 bool is_receiver, int* receiver_offset,
1271 VMRegPair src, VMRegPair dst,
1272 Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1273 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1274 "receiver has already been moved");
1275
1276 // We must pass a handle. First figure out the location we use as a handle.
1277
1278 if (src.first()->is_stack()) {
1279 // stack to stack or reg
1280
1281 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1282 Label skip;
1283 const int oop_slot_in_callers_frame = reg2slot(src.first());
1284
1285 guarantee(!is_receiver, "expecting receiver in register");
1286 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
1287
1288 __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
1289 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
1290 __ cmpdi(CR0, r_temp_2, 0);
1291 __ bne(CR0, skip);
1292 // Use a null handle if oop is null.
1293 __ li(r_handle, 0);
1294 __ bind(skip);
1295
1296 if (dst.first()->is_stack()) {
1297 // stack to stack
1298 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1299 } else {
1300 // stack to reg
1301 // Nothing to do, r_handle is already the dst register.
1302 }
1303 } else {
1304 // reg to stack or reg
1305 const Register r_oop = src.first()->as_Register();
1306 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1307 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
1308 + oop_handle_offset; // in slots
1309 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
1310 Label skip;
1311
1312 if (is_receiver) {
1313 *receiver_offset = oop_offset;
1314 }
1315 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
1316
1317 __ std( r_oop, oop_offset, R1_SP);
1318 __ addi(r_handle, R1_SP, oop_offset);
1319
1320 __ cmpdi(CR0, r_oop, 0);
1321 __ bne(CR0, skip);
1322 // Use a null handle if oop is null.
1323 __ li(r_handle, 0);
1324 __ bind(skip);
1325
1326 if (dst.first()->is_stack()) {
1327 // reg to stack
1328 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1329 } else {
1330 // reg to reg
1331 // Nothing to do, r_handle is already the dst register.
1332 }
1333 }
1334 }
1335
1336 static void int_move(MacroAssembler*masm,
1337 VMRegPair src, VMRegPair dst,
1338 Register r_caller_sp, Register r_temp) {
1339 assert(src.first()->is_valid(), "incoming must be int");
1340 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1341
1342 if (src.first()->is_stack()) {
1343 if (dst.first()->is_stack()) {
1344 // stack to stack
1345 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
1346 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1347 } else {
1348 // stack to reg
1349 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1350 }
1351 } else if (dst.first()->is_stack()) {
1352 // reg to stack
1353 __ extsw(r_temp, src.first()->as_Register());
1354 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1355 } else {
1356 // reg to reg
1357 __ extsw(dst.first()->as_Register(), src.first()->as_Register());
1358 }
1359 }
1360
1361 static void long_move(MacroAssembler*masm,
1362 VMRegPair src, VMRegPair dst,
1363 Register r_caller_sp, Register r_temp) {
1364 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
1365 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1366
1367 if (src.first()->is_stack()) {
1368 if (dst.first()->is_stack()) {
1369 // stack to stack
1370 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1371 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1372 } else {
1373 // stack to reg
1374 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1375 }
1376 } else if (dst.first()->is_stack()) {
1377 // reg to stack
1378 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
1379 } else {
1380 // reg to reg
1381 if (dst.first()->as_Register() != src.first()->as_Register())
1382 __ mr(dst.first()->as_Register(), src.first()->as_Register());
1383 }
1384 }
1385
1386 static void float_move(MacroAssembler*masm,
1387 VMRegPair src, VMRegPair dst,
1388 Register r_caller_sp, Register r_temp) {
1389 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
1390 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
1391
1392 if (src.first()->is_stack()) {
1393 if (dst.first()->is_stack()) {
1394 // stack to stack
1395 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
1396 __ stw(r_temp, reg2offset(dst.first()), R1_SP);
1397 } else {
1398 // stack to reg
1399 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1400 }
1401 } else if (dst.first()->is_stack()) {
1402 // reg to stack
1403 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1404 } else {
1405 // reg to reg
1406 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1407 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1408 }
1409 }
1410
1411 static void double_move(MacroAssembler*masm,
1412 VMRegPair src, VMRegPair dst,
1413 Register r_caller_sp, Register r_temp) {
1414 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
1415 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
1416
1417 if (src.first()->is_stack()) {
1418 if (dst.first()->is_stack()) {
1419 // stack to stack
1420 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1421 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1422 } else {
1423 // stack to reg
1424 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1425 }
1426 } else if (dst.first()->is_stack()) {
1427 // reg to stack
1428 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1429 } else {
1430 // reg to reg
1431 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1432 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1433 }
1434 }
1435
1436 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1437 switch (ret_type) {
1438 case T_BOOLEAN:
1439 case T_CHAR:
1440 case T_BYTE:
1441 case T_SHORT:
1442 case T_INT:
1443 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1444 break;
1445 case T_ARRAY:
1446 case T_OBJECT:
1447 case T_LONG:
1448 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1449 break;
1450 case T_FLOAT:
1451 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1452 break;
1453 case T_DOUBLE:
1454 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1455 break;
1456 case T_VOID:
1457 break;
1458 default:
1459 ShouldNotReachHere();
1460 break;
1461 }
1462 }
1463
1464 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1465 switch (ret_type) {
1466 case T_BOOLEAN:
1467 case T_CHAR:
1468 case T_BYTE:
1469 case T_SHORT:
1470 case T_INT:
1471 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1472 break;
1473 case T_ARRAY:
1474 case T_OBJECT:
1475 case T_LONG:
1476 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1477 break;
1478 case T_FLOAT:
1479 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1480 break;
1481 case T_DOUBLE:
1482 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1483 break;
1484 case T_VOID:
1485 break;
1486 default:
1487 ShouldNotReachHere();
1488 break;
1489 }
1490 }
1491
1492 static void verify_oop_args(MacroAssembler* masm,
1493 const methodHandle& method,
1494 const BasicType* sig_bt,
1495 const VMRegPair* regs) {
1496 Register temp_reg = R19_method; // not part of any compiled calling seq
1497 if (VerifyOops) {
1498 for (int i = 0; i < method->size_of_parameters(); i++) {
1499 if (is_reference_type(sig_bt[i])) {
1500 VMReg r = regs[i].first();
1501 assert(r->is_valid(), "bad oop arg");
1502 if (r->is_stack()) {
1503 __ ld(temp_reg, reg2offset(r), R1_SP);
1504 __ verify_oop(temp_reg, FILE_AND_LINE);
1505 } else {
1506 __ verify_oop(r->as_Register(), FILE_AND_LINE);
1507 }
1508 }
1509 }
1510 }
1511 }
1512
1513 static void gen_special_dispatch(MacroAssembler* masm,
1514 const methodHandle& method,
1515 const BasicType* sig_bt,
1516 const VMRegPair* regs) {
1517 verify_oop_args(masm, method, sig_bt, regs);
1518 vmIntrinsics::ID iid = method->intrinsic_id();
1519
1520 // Now write the args into the outgoing interpreter space
1521 bool has_receiver = false;
1522 Register receiver_reg = noreg;
1523 int member_arg_pos = -1;
1524 Register member_reg = noreg;
1525 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1526 if (ref_kind != 0) {
1527 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1528 member_reg = R19_method; // known to be free at this point
1529 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1530 } else if (iid == vmIntrinsics::_invokeBasic) {
1531 has_receiver = true;
1532 } else if (iid == vmIntrinsics::_linkToNative) {
1533 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
1534 member_reg = R19_method; // known to be free at this point
1535 } else {
1536 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1537 }
1538
1539 if (member_reg != noreg) {
1540 // Load the member_arg into register, if necessary.
1541 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1542 VMReg r = regs[member_arg_pos].first();
1543 if (r->is_stack()) {
1544 __ ld(member_reg, reg2offset(r), R1_SP);
1545 } else {
1546 // no data motion is needed
1547 member_reg = r->as_Register();
1548 }
1549 }
1550
1551 if (has_receiver) {
1552 // Make sure the receiver is loaded into a register.
1553 assert(method->size_of_parameters() > 0, "oob");
1554 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1555 VMReg r = regs[0].first();
1556 assert(r->is_valid(), "bad receiver arg");
1557 if (r->is_stack()) {
1558 // Porting note: This assumes that compiled calling conventions always
1559 // pass the receiver oop in a register. If this is not true on some
1560 // platform, pick a temp and load the receiver from stack.
1561 fatal("receiver always in a register");
1562 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point?
1563 __ ld(receiver_reg, reg2offset(r), R1_SP);
1564 } else {
1565 // no data motion is needed
1566 receiver_reg = r->as_Register();
1567 }
1568 }
1569
1570 // Figure out which address we are really jumping to:
1571 MethodHandles::generate_method_handle_dispatch(masm, iid,
1572 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1573 }
1574
1575 //---------------------------- continuation_enter_setup ---------------------------
1576 //
1577 // Frame setup.
1578 //
1579 // Arguments:
1580 // None.
1581 //
1582 // Results:
1583 // R1_SP: pointer to blank ContinuationEntry in the pushed frame.
1584 //
1585 // Kills:
1586 // R0, R20
1587 //
1588 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) {
1589 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1590 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, "");
1591 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1592
1593 const int frame_size_in_bytes = (int)ContinuationEntry::size();
1594 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error");
1595
1596 framesize_words = frame_size_in_bytes / wordSize;
1597
1598 DEBUG_ONLY(__ block_comment("setup {"));
1599 // Save return pc and push entry frame
1600 const Register return_pc = R20;
1601 __ mflr(return_pc);
1602 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc
1603 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes
1604
1605 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1606
1607 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread);
1608 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1609 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP);
1610 DEBUG_ONLY(__ block_comment("} setup"));
1611
1612 return map;
1613 }
1614
1615 //---------------------------- fill_continuation_entry ---------------------------
1616 //
1617 // Initialize the new ContinuationEntry.
1618 //
1619 // Arguments:
1620 // R1_SP: pointer to blank Continuation entry
1621 // reg_cont_obj: pointer to the continuation
1622 // reg_flags: flags
1623 //
1624 // Results:
1625 // R1_SP: pointer to filled out ContinuationEntry
1626 //
1627 // Kills:
1628 // R8_ARG6, R9_ARG7, R10_ARG8
1629 //
1630 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) {
1631 assert_different_registers(reg_cont_obj, reg_flags);
1632 Register zero = R8_ARG6;
1633 Register tmp2 = R9_ARG7;
1634
1635 DEBUG_ONLY(__ block_comment("fill {"));
1636 #ifdef ASSERT
1637 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value());
1638 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP);
1639 #endif //ASSERT
1640
1641 __ li(zero, 0);
1642 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP);
1643 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
1644 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP);
1645 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP);
1646 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP);
1647
1648 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread);
1649 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1650
1651 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread);
1652 DEBUG_ONLY(__ block_comment("} fill"));
1653 }
1654
1655 //---------------------------- continuation_enter_cleanup ---------------------------
1656 //
1657 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread
1658 // before deleting it.
1659 //
1660 // Arguments:
1661 // R1_SP: pointer to the ContinuationEntry
1662 //
1663 // Results:
1664 // None.
1665 //
1666 // Kills:
1667 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp
1668 //
1669 static void continuation_enter_cleanup(MacroAssembler* masm) {
1670 Register tmp1 = R8_ARG6;
1671 Register tmp2 = R9_ARG7;
1672
1673 #ifdef ASSERT
1674 __ block_comment("clean {");
1675 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
1676 __ cmpd(CR0, R1_SP, tmp1);
1677 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
1678 #endif
1679
1680 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1681 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread);
1682 __ ld_ptr(tmp2, ContinuationEntry::parent_offset(), R1_SP);
1683 __ st_ptr(tmp2, JavaThread::cont_entry_offset(), R16_thread);
1684 DEBUG_ONLY(__ block_comment("} clean"));
1685 }
1686
1687 static void check_continuation_enter_argument(VMReg actual_vmreg,
1688 Register expected_reg,
1689 const char* name) {
1690 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name);
1691 assert(actual_vmreg->as_Register() == expected_reg,
1692 "%s is in unexpected register: %s instead of %s",
1693 name, actual_vmreg->as_Register()->name(), expected_reg->name());
1694 }
1695
1696 static void gen_continuation_enter(MacroAssembler* masm,
1697 const VMRegPair* regs,
1698 int& exception_offset,
1699 OopMapSet* oop_maps,
1700 int& frame_complete,
1701 int& framesize_words,
1702 int& interpreted_entry_offset,
1703 int& compiled_entry_offset) {
1704
1705 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1706 int pos_cont_obj = 0;
1707 int pos_is_cont = 1;
1708 int pos_is_virtual = 2;
1709
1710 // The platform-specific calling convention may present the arguments in various registers.
1711 // To simplify the rest of the code, we expect the arguments to reside at these known
1712 // registers, and we additionally check the placement here in case calling convention ever
1713 // changes.
1714 Register reg_cont_obj = R3_ARG1;
1715 Register reg_is_cont = R4_ARG2;
1716 Register reg_is_virtual = R5_ARG3;
1717
1718 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object");
1719 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue");
1720 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread");
1721
1722 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub();
1723
1724 address start = __ pc();
1725
1726 Label L_thaw, L_exit;
1727
1728 // i2i entry used at interp_only_mode only
1729 interpreted_entry_offset = __ pc() - start;
1730 {
1731 #ifdef ASSERT
1732 Label is_interp_only;
1733 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1734 __ cmpwi(CR0, R0, 0);
1735 __ bne(CR0, is_interp_only);
1736 __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1737 __ bind(is_interp_only);
1738 #endif
1739
1740 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1741 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp);
1742 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp);
1743 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp);
1744
1745 __ push_cont_fastpath();
1746
1747 OopMap* map = continuation_enter_setup(masm, framesize_words);
1748
1749 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1750 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1751
1752 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1753
1754 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1755 __ cmpwi(CR0, reg_is_cont, 0);
1756 __ bne(CR0, L_thaw);
1757
1758 // --- call Continuation.enter(Continuation c, boolean isContinue)
1759
1760 // Emit compiled static call. The call will be always resolved to the c2i
1761 // entry of Continuation.enter(Continuation c, boolean isContinue).
1762 // There are special cases in SharedRuntime::resolve_static_call_C() and
1763 // SharedRuntime::resolve_sub_helper_internal() to achieve this
1764 // See also corresponding call below.
1765 address c2i_call_pc = __ pc();
1766 int start_offset = __ offset();
1767 // Put the entry point as a constant into the constant pool.
1768 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1769 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1770 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1771
1772 // Emit the trampoline stub which will be related to the branch-and-link below.
1773 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1774 guarantee(stub != nullptr, "no space for trampoline stub");
1775
1776 __ relocate(relocInfo::static_call_type);
1777 // Note: At this point we do not have the address of the trampoline
1778 // stub, and the entry point might be too far away for bl, so __ pc()
1779 // serves as dummy and the bl will be patched later.
1780 __ bl(__ pc());
1781 oop_maps->add_gc_map(__ pc() - start, map);
1782 __ post_call_nop();
1783
1784 __ b(L_exit);
1785
1786 // static stub for the call above
1787 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc);
1788 guarantee(stub != nullptr, "no space for static stub");
1789 }
1790
1791 // compiled entry
1792 __ align(CodeEntryAlignment);
1793 compiled_entry_offset = __ pc() - start;
1794
1795 OopMap* map = continuation_enter_setup(masm, framesize_words);
1796
1797 // Frame is now completed as far as size and linkage.
1798 frame_complete =__ pc() - start;
1799
1800 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1801
1802 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1803 __ cmpwi(CR0, reg_is_cont, 0);
1804 __ bne(CR0, L_thaw);
1805
1806 // --- call Continuation.enter(Continuation c, boolean isContinue)
1807
1808 // Emit compiled static call
1809 // The call needs to be resolved. There's a special case for this in
1810 // SharedRuntime::find_callee_info_helper() which calls
1811 // LinkResolver::resolve_continuation_enter() which resolves the call to
1812 // Continuation.enter(Continuation c, boolean isContinue).
1813 address call_pc = __ pc();
1814 int start_offset = __ offset();
1815 // Put the entry point as a constant into the constant pool.
1816 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1817 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1818 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1819
1820 // Emit the trampoline stub which will be related to the branch-and-link below.
1821 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1822 guarantee(stub != nullptr, "no space for trampoline stub");
1823
1824 __ relocate(relocInfo::static_call_type);
1825 // Note: At this point we do not have the address of the trampoline
1826 // stub, and the entry point might be too far away for bl, so __ pc()
1827 // serves as dummy and the bl will be patched later.
1828 __ bl(__ pc());
1829 oop_maps->add_gc_map(__ pc() - start, map);
1830 __ post_call_nop();
1831
1832 __ b(L_exit);
1833
1834 // --- Thawing path
1835
1836 __ bind(L_thaw);
1837 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1838 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw()));
1839 __ mtctr(R0);
1840 __ bctrl();
1841 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1842 ContinuationEntry::_return_pc_offset = __ pc() - start;
1843 __ post_call_nop();
1844
1845 // --- Normal exit (resolve/thawing)
1846
1847 __ bind(L_exit);
1848 ContinuationEntry::_cleanup_offset = __ pc() - start;
1849 continuation_enter_cleanup(masm);
1850
1851 // Pop frame and return
1852 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP));
1853 __ addi(R1_SP, R1_SP, framesize_words*wordSize);
1854 DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP));
1855 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size");
1856 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1857 __ mtlr(R0);
1858 __ blr();
1859
1860 // --- Exception handling path
1861
1862 exception_offset = __ pc() - start;
1863
1864 continuation_enter_cleanup(masm);
1865 Register ex_pc = R17_tos; // nonvolatile register
1866 Register ex_oop = R15_esp; // nonvolatile register
1867 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc
1868 __ ld(ex_pc, _abi0(lr), ex_pc);
1869 __ mr(ex_oop, R3_RET); // save return value containing the exception oop
1870 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc);
1871 __ mtlr(R3_RET); // the exception handler
1872 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame
1873
1874 // Continue at exception handler
1875 // See OptoRuntime::generate_exception_blob for register arguments
1876 __ mr(R3_ARG1, ex_oop); // pass exception oop
1877 __ mr(R4_ARG2, ex_pc); // pass exception pc
1878 __ blr();
1879
1880 // static stub for the call above
1881 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
1882 guarantee(stub != nullptr, "no space for static stub");
1883 }
1884
1885 static void gen_continuation_yield(MacroAssembler* masm,
1886 const VMRegPair* regs,
1887 OopMapSet* oop_maps,
1888 int& frame_complete,
1889 int& framesize_words,
1890 int& compiled_entry_offset) {
1891 Register tmp = R10_ARG8;
1892
1893 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes);
1894 framesize_words = framesize_bytes / wordSize;
1895
1896 address start = __ pc();
1897 compiled_entry_offset = __ pc() - start;
1898
1899 // Save return pc and push entry frame
1900 __ mflr(tmp);
1901 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc
1902 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes
1903
1904 DEBUG_ONLY(__ block_comment("Frame Complete"));
1905 frame_complete = __ pc() - start;
1906 address last_java_pc = __ pc();
1907
1908 // This nop must be exactly at the PC we push into the frame info.
1909 // We use this nop for fast CodeBlob lookup, associate the OopMap
1910 // with it right away.
1911 __ post_call_nop();
1912 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1);
1913 oop_maps->add_gc_map(last_java_pc - start, map);
1914
1915 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated
1916 __ set_last_Java_frame(R1_SP, tmp);
1917 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP);
1918 __ reset_last_Java_frame();
1919
1920 Label L_pinned;
1921
1922 __ cmpwi(CR0, R3_RET, 0);
1923 __ bne(CR0, L_pinned);
1924
1925 // yield succeeded
1926
1927 // Pop frames of continuation including this stub's frame
1928 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1929 // The frame pushed by gen_continuation_enter is on top now again
1930 continuation_enter_cleanup(masm);
1931
1932 // Pop frame and return
1933 Label L_return;
1934 __ bind(L_return);
1935 __ pop_frame();
1936 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1937 __ mtlr(R0);
1938 __ blr();
1939
1940 // yield failed - continuation is pinned
1941
1942 __ bind(L_pinned);
1943
1944 // handle pending exception thrown by freeze
1945 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
1946 __ cmpdi(CR0, tmp, 0);
1947 __ beq(CR0, L_return); // return if no exception is pending
1948 __ pop_frame();
1949 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1950 __ mtlr(R0);
1951 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0);
1952 __ mtctr(tmp);
1953 __ bctr();
1954 }
1955
1956 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1957 ::continuation_enter_cleanup(masm);
1958 }
1959
1960 // ---------------------------------------------------------------------------
1961 // Generate a native wrapper for a given method. The method takes arguments
1962 // in the Java compiled code convention, marshals them to the native
1963 // convention (handlizes oops, etc), transitions to native, makes the call,
1964 // returns to java state (possibly blocking), unhandlizes any result and
1965 // returns.
1966 //
1967 // Critical native functions are a shorthand for the use of
1968 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1969 // functions. The wrapper is expected to unpack the arguments before
1970 // passing them to the callee. Critical native functions leave the state _in_Java,
1971 // since they cannot stop for GC.
1972 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1973 // block and the check for pending exceptions it's impossible for them
1974 // to be thrown.
1975 //
1976 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1977 const methodHandle& method,
1978 int compile_id,
1979 BasicType *in_sig_bt,
1980 VMRegPair *in_regs,
1981 BasicType ret_type) {
1982 if (method->is_continuation_native_intrinsic()) {
1983 int exception_offset = -1;
1984 OopMapSet* oop_maps = new OopMapSet();
1985 int frame_complete = -1;
1986 int stack_slots = -1;
1987 int interpreted_entry_offset = -1;
1988 int vep_offset = -1;
1989 if (method->is_continuation_enter_intrinsic()) {
1990 gen_continuation_enter(masm,
1991 in_regs,
1992 exception_offset,
1993 oop_maps,
1994 frame_complete,
1995 stack_slots,
1996 interpreted_entry_offset,
1997 vep_offset);
1998 } else if (method->is_continuation_yield_intrinsic()) {
1999 gen_continuation_yield(masm,
2000 in_regs,
2001 oop_maps,
2002 frame_complete,
2003 stack_slots,
2004 vep_offset);
2005 } else {
2006 guarantee(false, "Unknown Continuation native intrinsic");
2007 }
2008
2009 #ifdef ASSERT
2010 if (method->is_continuation_enter_intrinsic()) {
2011 assert(interpreted_entry_offset != -1, "Must be set");
2012 assert(exception_offset != -1, "Must be set");
2013 } else {
2014 assert(interpreted_entry_offset == -1, "Must be unset");
2015 assert(exception_offset == -1, "Must be unset");
2016 }
2017 assert(frame_complete != -1, "Must be set");
2018 assert(stack_slots != -1, "Must be set");
2019 assert(vep_offset != -1, "Must be set");
2020 #endif
2021
2022 __ flush();
2023 nmethod* nm = nmethod::new_native_nmethod(method,
2024 compile_id,
2025 masm->code(),
2026 vep_offset,
2027 frame_complete,
2028 stack_slots,
2029 in_ByteSize(-1),
2030 in_ByteSize(-1),
2031 oop_maps,
2032 exception_offset);
2033 if (nm == nullptr) return nm;
2034 if (method->is_continuation_enter_intrinsic()) {
2035 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
2036 } else if (method->is_continuation_yield_intrinsic()) {
2037 _cont_doYield_stub = nm;
2038 }
2039 return nm;
2040 }
2041
2042 if (method->is_method_handle_intrinsic()) {
2043 vmIntrinsics::ID iid = method->intrinsic_id();
2044 intptr_t start = (intptr_t)__ pc();
2045 int vep_offset = ((intptr_t)__ pc()) - start;
2046 gen_special_dispatch(masm,
2047 method,
2048 in_sig_bt,
2049 in_regs);
2050 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
2051 __ flush();
2052 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
2053 return nmethod::new_native_nmethod(method,
2054 compile_id,
2055 masm->code(),
2056 vep_offset,
2057 frame_complete,
2058 stack_slots / VMRegImpl::slots_per_word,
2059 in_ByteSize(-1),
2060 in_ByteSize(-1),
2061 (OopMapSet*)nullptr);
2062 }
2063
2064 address native_func = method->native_function();
2065 assert(native_func != nullptr, "must have function");
2066
2067 // First, create signature for outgoing C call
2068 // --------------------------------------------------------------------------
2069
2070 int total_in_args = method->size_of_parameters();
2071 // We have received a description of where all the java args are located
2072 // on entry to the wrapper. We need to convert these args to where
2073 // the jni function will expect them. To figure out where they go
2074 // we convert the java signature to a C signature by inserting
2075 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2076
2077 // Calculate the total number of C arguments and create arrays for the
2078 // signature and the outgoing registers.
2079 // On ppc64, we have two arrays for the outgoing registers, because
2080 // some floating-point arguments must be passed in registers _and_
2081 // in stack locations.
2082 bool method_is_static = method->is_static();
2083 int total_c_args = total_in_args + (method_is_static ? 2 : 1);
2084
2085 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2086 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2087
2088 // Create the signature for the C call:
2089 // 1) add the JNIEnv*
2090 // 2) add the class if the method is static
2091 // 3) copy the rest of the incoming signature (shifted by the number of
2092 // hidden arguments).
2093
2094 int argc = 0;
2095 out_sig_bt[argc++] = T_ADDRESS;
2096 if (method->is_static()) {
2097 out_sig_bt[argc++] = T_OBJECT;
2098 }
2099
2100 for (int i = 0; i < total_in_args ; i++ ) {
2101 out_sig_bt[argc++] = in_sig_bt[i];
2102 }
2103
2104
2105 // Compute the wrapper's frame size.
2106 // --------------------------------------------------------------------------
2107
2108 // Now figure out where the args must be stored and how much stack space
2109 // they require.
2110 //
2111 // Compute framesize for the wrapper. We need to handlize all oops in
2112 // incoming registers.
2113 //
2114 // Calculate the total number of stack slots we will need:
2115 // 1) abi requirements
2116 // 2) outgoing arguments
2117 // 3) space for inbound oop handle area
2118 // 4) space for handlizing a klass if static method
2119 // 5) space for a lock if synchronized method
2120 // 6) workspace for saving return values, int <-> float reg moves, etc.
2121 // 7) alignment
2122 //
2123 // Layout of the native wrapper frame:
2124 // (stack grows upwards, memory grows downwards)
2125 //
2126 // NW [ABI_REG_ARGS] <-- 1) R1_SP
2127 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
2128 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset
2129 // klass <-- 4) R1_SP + klass_offset
2130 // lock <-- 5) R1_SP + lock_offset
2131 // [workspace] <-- 6) R1_SP + workspace_offset
2132 // [alignment] (optional) <-- 7)
2133 // caller [JIT_TOP_ABI_48] <-- r_callers_sp
2134 //
2135 // - *_slot_offset Indicates offset from SP in number of stack slots.
2136 // - *_offset Indicates offset from SP in bytes.
2137
2138 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2)
2139 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
2140
2141 // Now the space for the inbound oop handle area.
2142 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
2143
2144 int oop_handle_slot_offset = stack_slots;
2145 stack_slots += total_save_slots; // 3)
2146
2147 int klass_slot_offset = 0;
2148 int klass_offset = -1;
2149 if (method_is_static) { // 4)
2150 klass_slot_offset = stack_slots;
2151 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2152 stack_slots += VMRegImpl::slots_per_word;
2153 }
2154
2155 int lock_slot_offset = 0;
2156 int lock_offset = -1;
2157 if (method->is_synchronized()) { // 5)
2158 lock_slot_offset = stack_slots;
2159 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
2160 stack_slots += VMRegImpl::slots_per_word;
2161 }
2162
2163 int workspace_slot_offset = stack_slots; // 6)
2164 stack_slots += 2;
2165
2166 // Now compute actual number of stack words we need.
2167 // Rounding to make stack properly aligned.
2168 stack_slots = align_up(stack_slots, // 7)
2169 frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
2170 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
2171
2172
2173 // Now we can start generating code.
2174 // --------------------------------------------------------------------------
2175
2176 intptr_t start_pc = (intptr_t)__ pc();
2177 intptr_t vep_start_pc;
2178 intptr_t frame_done_pc;
2179
2180 Label handle_pending_exception;
2181 Label last_java_pc;
2182
2183 Register r_callers_sp = R21;
2184 Register r_temp_1 = R22;
2185 Register r_temp_2 = R23;
2186 Register r_temp_3 = R24;
2187 Register r_temp_4 = R25;
2188 Register r_temp_5 = R26;
2189 Register r_temp_6 = R27;
2190 Register r_last_java_pc = R28;
2191
2192 Register r_carg1_jnienv = noreg;
2193 Register r_carg2_classorobject = noreg;
2194 r_carg1_jnienv = out_regs[0].first()->as_Register();
2195 r_carg2_classorobject = out_regs[1].first()->as_Register();
2196
2197
2198 // Generate the Unverified Entry Point (UEP).
2199 // --------------------------------------------------------------------------
2200 assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2201
2202 // Check ic: object class == cached class?
2203 if (!method_is_static) {
2204 __ ic_check(4 /* end_alignment */);
2205 }
2206
2207 // Generate the Verified Entry Point (VEP).
2208 // --------------------------------------------------------------------------
2209 vep_start_pc = (intptr_t)__ pc();
2210
2211 if (method->needs_clinit_barrier()) {
2212 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2213 Label L_skip_barrier;
2214 Register klass = r_temp_1;
2215 // Notify OOP recorder (don't need the relocation)
2216 AddressLiteral md = __ constant_metadata_address(method->method_holder());
2217 __ load_const_optimized(klass, md.value(), R0);
2218 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
2219
2220 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
2221 __ mtctr(klass);
2222 __ bctr();
2223
2224 __ bind(L_skip_barrier);
2225 }
2226
2227 __ save_LR(r_temp_1);
2228 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2229 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2230 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2231
2232 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2233 bs->nmethod_entry_barrier(masm, r_temp_1);
2234
2235 frame_done_pc = (intptr_t)__ pc();
2236
2237 // Native nmethod wrappers never take possession of the oop arguments.
2238 // So the caller will gc the arguments.
2239 // The only thing we need an oopMap for is if the call is static.
2240 //
2241 // An OopMap for lock (and class if static), and one for the VM call itself.
2242 OopMapSet *oop_maps = new OopMapSet();
2243 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2244
2245 // Move arguments from register/stack to register/stack.
2246 // --------------------------------------------------------------------------
2247 //
2248 // We immediately shuffle the arguments so that for any vm call we have
2249 // to make from here on out (sync slow path, jvmti, etc.) we will have
2250 // captured the oops from our caller and have a valid oopMap for them.
2251 //
2252 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2253 // (derived from JavaThread* which is in R16_thread) and, if static,
2254 // the class mirror instead of a receiver. This pretty much guarantees that
2255 // register layout will not match. We ignore these extra arguments during
2256 // the shuffle. The shuffle is described by the two calling convention
2257 // vectors we have in our possession. We simply walk the java vector to
2258 // get the source locations and the c vector to get the destinations.
2259
2260 // Record sp-based slot for receiver on stack for non-static methods.
2261 int receiver_offset = -1;
2262
2263 // We move the arguments backward because the floating point registers
2264 // destination will always be to a register with a greater or equal
2265 // register number or the stack.
2266 // in is the index of the incoming Java arguments
2267 // out is the index of the outgoing C arguments
2268
2269 #ifdef ASSERT
2270 bool reg_destroyed[Register::number_of_registers];
2271 bool freg_destroyed[FloatRegister::number_of_registers];
2272 for (int r = 0 ; r < Register::number_of_registers ; r++) {
2273 reg_destroyed[r] = false;
2274 }
2275 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) {
2276 freg_destroyed[f] = false;
2277 }
2278 #endif // ASSERT
2279
2280 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
2281
2282 #ifdef ASSERT
2283 if (in_regs[in].first()->is_Register()) {
2284 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
2285 } else if (in_regs[in].first()->is_FloatRegister()) {
2286 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
2287 }
2288 if (out_regs[out].first()->is_Register()) {
2289 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
2290 } else if (out_regs[out].first()->is_FloatRegister()) {
2291 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
2292 }
2293 #endif // ASSERT
2294
2295 switch (in_sig_bt[in]) {
2296 case T_BOOLEAN:
2297 case T_CHAR:
2298 case T_BYTE:
2299 case T_SHORT:
2300 case T_INT:
2301 // Move int and do sign extension.
2302 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2303 break;
2304 case T_LONG:
2305 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2306 break;
2307 case T_ARRAY:
2308 case T_OBJECT:
2309 object_move(masm, stack_slots,
2310 oop_map, oop_handle_slot_offset,
2311 ((in == 0) && (!method_is_static)), &receiver_offset,
2312 in_regs[in], out_regs[out],
2313 r_callers_sp, r_temp_1, r_temp_2);
2314 break;
2315 case T_VOID:
2316 break;
2317 case T_FLOAT:
2318 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2319 break;
2320 case T_DOUBLE:
2321 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2322 break;
2323 case T_ADDRESS:
2324 fatal("found type (T_ADDRESS) in java args");
2325 break;
2326 default:
2327 ShouldNotReachHere();
2328 break;
2329 }
2330 }
2331
2332 // Pre-load a static method's oop into ARG2.
2333 // Used both by locking code and the normal JNI call code.
2334 if (method_is_static) {
2335 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
2336 r_carg2_classorobject);
2337
2338 // Now handlize the static class mirror in carg2. It's known not-null.
2339 __ std(r_carg2_classorobject, klass_offset, R1_SP);
2340 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2341 __ addi(r_carg2_classorobject, R1_SP, klass_offset);
2342 }
2343
2344 // Get JNIEnv* which is first argument to native.
2345 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
2346
2347 // NOTE:
2348 //
2349 // We have all of the arguments setup at this point.
2350 // We MUST NOT touch any outgoing regs from this point on.
2351 // So if we must call out we must push a new frame.
2352
2353 // The last java pc will also be used as resume pc if this is the wrapper for wait0.
2354 // For this purpose the precise location matters but not for oopmap lookup.
2355 __ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true);
2356
2357 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
2358 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
2359
2360 # if 0
2361 // DTrace method entry
2362 # endif
2363
2364 // Lock a synchronized method.
2365 // --------------------------------------------------------------------------
2366
2367 if (method->is_synchronized()) {
2368 Register r_oop = r_temp_4;
2369 const Register r_box = r_temp_5;
2370 Label done, locked;
2371
2372 // Load the oop for the object or class. r_carg2_classorobject contains
2373 // either the handlized oop from the incoming arguments or the handlized
2374 // class mirror (if the method is static).
2375 __ ld(r_oop, 0, r_carg2_classorobject);
2376
2377 // Get the lock box slot's address.
2378 __ addi(r_box, R1_SP, lock_offset);
2379
2380 // Try fastpath for locking.
2381 // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
2382 Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
2383 __ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
2384 __ beq(CR0, locked);
2385
2386 // None of the above fast optimizations worked so we have to get into the
2387 // slow case of monitor enter. Inline a special case of call_VM that
2388 // disallows any pending_exception.
2389
2390 // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
2391 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes);
2392 __ mr(R11_scratch1, R1_SP);
2393 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs);
2394
2395 // Do the call.
2396 __ set_last_Java_frame(R11_scratch1, r_last_java_pc);
2397 assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call");
2398 // The following call will not be preempted.
2399 // push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the
2400 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()).
2401 __ push_cont_fastpath();
2402 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
2403 __ pop_cont_fastpath();
2404 __ reset_last_Java_frame();
2405
2406 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs);
2407
2408 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2409 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C");
2410
2411 __ bind(locked);
2412 }
2413
2414 __ set_last_Java_frame(R1_SP, r_last_java_pc);
2415
2416 // Publish thread state
2417 // --------------------------------------------------------------------------
2418
2419 // Transition from _thread_in_Java to _thread_in_native.
2420 __ li(R0, _thread_in_native);
2421 __ release();
2422 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2423 __ stw(R0, thread_(thread_state));
2424
2425
2426 // The JNI call
2427 // --------------------------------------------------------------------------
2428 __ call_c(native_func, relocInfo::runtime_call_type);
2429
2430
2431 // Now, we are back from the native code.
2432
2433
2434 // Unpack the native result.
2435 // --------------------------------------------------------------------------
2436
2437 // For int-types, we do any needed sign-extension required.
2438 // Care must be taken that the return values (R3_RET and F1_RET)
2439 // will survive any VM calls for blocking or unlocking.
2440 // An OOP result (handle) is done specially in the slow-path code.
2441
2442 switch (ret_type) {
2443 case T_VOID: break; // Nothing to do!
2444 case T_FLOAT: break; // Got it where we want it (unless slow-path).
2445 case T_DOUBLE: break; // Got it where we want it (unless slow-path).
2446 case T_LONG: break; // Got it where we want it (unless slow-path).
2447 case T_OBJECT: break; // Really a handle.
2448 // Cannot de-handlize until after reclaiming jvm_lock.
2449 case T_ARRAY: break;
2450
2451 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1)
2452 __ normalize_bool(R3_RET);
2453 break;
2454 }
2455 case T_BYTE: { // sign extension
2456 __ extsb(R3_RET, R3_RET);
2457 break;
2458 }
2459 case T_CHAR: { // unsigned result
2460 __ andi(R3_RET, R3_RET, 0xffff);
2461 break;
2462 }
2463 case T_SHORT: { // sign extension
2464 __ extsh(R3_RET, R3_RET);
2465 break;
2466 }
2467 case T_INT: // nothing to do
2468 break;
2469 default:
2470 ShouldNotReachHere();
2471 break;
2472 }
2473
2474 // Publish thread state
2475 // --------------------------------------------------------------------------
2476
2477 // Switch thread to "native transition" state before reading the
2478 // synchronization state. This additional state is necessary because reading
2479 // and testing the synchronization state is not atomic w.r.t. GC, as this
2480 // scenario demonstrates:
2481 // - Java thread A, in _thread_in_native state, loads _not_synchronized
2482 // and is preempted.
2483 // - VM thread changes sync state to synchronizing and suspends threads
2484 // for GC.
2485 // - Thread A is resumed to finish this native method, but doesn't block
2486 // here since it didn't see any synchronization in progress, and escapes.
2487
2488 // Transition from _thread_in_native to _thread_in_native_trans.
2489 __ li(R0, _thread_in_native_trans);
2490 __ release();
2491 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2492 __ stw(R0, thread_(thread_state));
2493
2494
2495 // Must we block?
2496 // --------------------------------------------------------------------------
2497
2498 // Block, if necessary, before resuming in _thread_in_Java state.
2499 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2500 {
2501 Label no_block, sync;
2502
2503 // Force this write out before the read below.
2504 if (!UseSystemMemoryBarrier) {
2505 __ fence();
2506 }
2507
2508 Register sync_state_addr = r_temp_4;
2509 Register sync_state = r_temp_5;
2510 Register suspend_flags = r_temp_6;
2511
2512 // No synchronization in progress nor yet synchronized
2513 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
2514 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */);
2515
2516 // Not suspended.
2517 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2518 __ lwz(suspend_flags, thread_(suspend_flags));
2519 __ cmpwi(CR1, suspend_flags, 0);
2520 __ beq(CR1, no_block);
2521
2522 // Block. Save any potential method result value before the operation and
2523 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2524 // lets us share the oopMap we used when we went native rather than create
2525 // a distinct one for this pc.
2526 __ bind(sync);
2527 __ isync();
2528
2529 address entry_point =
2530 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2531 save_native_result(masm, ret_type, workspace_slot_offset);
2532 __ call_VM_leaf(entry_point, R16_thread);
2533 restore_native_result(masm, ret_type, workspace_slot_offset);
2534
2535 __ bind(no_block);
2536
2537 // Publish thread state.
2538 // --------------------------------------------------------------------------
2539
2540 // Thread state is thread_in_native_trans. Any safepoint blocking has
2541 // already happened so we can now change state to _thread_in_Java.
2542
2543 // Transition from _thread_in_native_trans to _thread_in_Java.
2544 __ li(R0, _thread_in_Java);
2545 __ lwsync(); // Acquire safepoint and suspend state, release thread state.
2546 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2547 __ stw(R0, thread_(thread_state));
2548
2549 // Check preemption for Object.wait()
2550 if (method->is_object_wait0()) {
2551 Label not_preempted;
2552 __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
2553 __ cmpdi(CR0, R0, 0);
2554 __ beq(CR0, not_preempted);
2555 __ mtlr(R0);
2556 __ li(R0, 0);
2557 __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
2558 __ blr();
2559 __ bind(not_preempted);
2560 }
2561 __ bind(last_java_pc);
2562 // We use the same pc/oopMap repeatedly when we call out above.
2563 intptr_t oopmap_pc = (intptr_t) __ pc();
2564 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
2565 }
2566
2567 // Reguard any pages if necessary.
2568 // --------------------------------------------------------------------------
2569
2570 Label no_reguard;
2571 __ lwz(r_temp_1, thread_(stack_guard_state));
2572 __ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
2573 __ bne(CR0, no_reguard);
2574
2575 save_native_result(masm, ret_type, workspace_slot_offset);
2576 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2577 restore_native_result(masm, ret_type, workspace_slot_offset);
2578
2579 __ bind(no_reguard);
2580
2581
2582 // Unlock
2583 // --------------------------------------------------------------------------
2584
2585 if (method->is_synchronized()) {
2586 const Register r_oop = r_temp_4;
2587 const Register r_box = r_temp_5;
2588 const Register r_exception = r_temp_6;
2589 Label done;
2590
2591 // Get oop and address of lock object box.
2592 if (method_is_static) {
2593 assert(klass_offset != -1, "");
2594 __ ld(r_oop, klass_offset, R1_SP);
2595 } else {
2596 assert(receiver_offset != -1, "");
2597 __ ld(r_oop, receiver_offset, R1_SP);
2598 }
2599 __ addi(r_box, R1_SP, lock_offset);
2600
2601 // Try fastpath for unlocking.
2602 __ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2603 __ beq(CR0, done);
2604
2605 // Save and restore any potential method result value around the unlocking operation.
2606 save_native_result(masm, ret_type, workspace_slot_offset);
2607
2608 // Must save pending exception around the slow-path VM call. Since it's a
2609 // leaf call, the pending exception (if any) can be kept in a register.
2610 __ ld(r_exception, thread_(pending_exception));
2611 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
2612 __ li(R0, 0);
2613 __ std(R0, thread_(pending_exception));
2614
2615 // Slow case of monitor enter.
2616 // Inline a special case of call_VM that disallows any pending_exception.
2617 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread).
2618 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread);
2619
2620 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2621 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C");
2622
2623 restore_native_result(masm, ret_type, workspace_slot_offset);
2624
2625 // Check_forward_pending_exception jump to forward_exception if any pending
2626 // exception is set. The forward_exception routine expects to see the
2627 // exception in pending_exception and not in a register. Kind of clumsy,
2628 // since all folks who branch to forward_exception must have tested
2629 // pending_exception first and hence have it in a register already.
2630 __ std(r_exception, thread_(pending_exception));
2631
2632 __ bind(done);
2633 }
2634
2635 # if 0
2636 // DTrace method exit
2637 # endif
2638
2639 // Clear "last Java frame" SP and PC.
2640 // --------------------------------------------------------------------------
2641
2642 // Last java frame won't be set if we're resuming after preemption
2643 bool maybe_preempted = method->is_object_wait0();
2644 __ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */);
2645
2646 // Unbox oop result, e.g. JNIHandles::resolve value.
2647 // --------------------------------------------------------------------------
2648
2649 if (is_reference_type(ret_type)) {
2650 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE);
2651 }
2652
2653 if (CheckJNICalls) {
2654 // clear_pending_jni_exception_check
2655 __ load_const_optimized(R0, 0L);
2656 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
2657 }
2658
2659 // Reset handle block.
2660 // --------------------------------------------------------------------------
2661 __ ld(r_temp_1, thread_(active_handles));
2662 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
2663 __ li(r_temp_2, 0);
2664 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1);
2665
2666 // Prepare for return
2667 // --------------------------------------------------------------------------
2668 __ pop_frame();
2669 __ restore_LR(R11);
2670
2671 #if INCLUDE_JFR
2672 // We need to do a poll test after unwind in case the sampler
2673 // managed to sample the native frame after returning to Java.
2674 Label L_stub;
2675 int safepoint_offset = __ offset();
2676 if (!UseSIGTRAP) {
2677 __ relocate(relocInfo::poll_return_type);
2678 }
2679 __ safepoint_poll(L_stub, r_temp_2, true /* at_return */, true /* in_nmethod: frame already popped */);
2680 #endif // INCLUDE_JFR
2681
2682 // Check for pending exceptions.
2683 // --------------------------------------------------------------------------
2684 __ ld(r_temp_2, thread_(pending_exception));
2685 __ cmpdi(CR0, r_temp_2, 0);
2686 __ bne(CR0, handle_pending_exception);
2687
2688 // Return.
2689 __ blr();
2690
2691 // Handler for return safepoint (out-of-line).
2692 #if INCLUDE_JFR
2693 if (!UseSIGTRAP) {
2694 __ bind(L_stub);
2695 __ jump_to_polling_page_return_handler_blob(safepoint_offset);
2696 }
2697 #endif // INCLUDE_JFR
2698
2699 // Handler for pending exceptions (out-of-line).
2700 // --------------------------------------------------------------------------
2701 // Since this is a native call, we know the proper exception handler
2702 // is the empty function. We just pop this frame and then jump to
2703 // forward_exception_entry.
2704 __ bind(handle_pending_exception);
2705 __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2706 relocInfo::runtime_call_type);
2707
2708 // Done.
2709 // --------------------------------------------------------------------------
2710
2711 __ flush();
2712
2713 nmethod *nm = nmethod::new_native_nmethod(method,
2714 compile_id,
2715 masm->code(),
2716 vep_start_pc-start_pc,
2717 frame_done_pc-start_pc,
2718 stack_slots / VMRegImpl::slots_per_word,
2719 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2720 in_ByteSize(lock_offset),
2721 oop_maps);
2722
2723 return nm;
2724 }
2725
2726 // This function returns the adjust size (in number of words) to a c2i adapter
2727 // activation for use during deoptimization.
2728 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2729 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words);
2730 }
2731
2732 uint SharedRuntime::in_preserve_stack_slots() {
2733 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size;
2734 }
2735
2736 uint SharedRuntime::out_preserve_stack_slots() {
2737 #if defined(COMPILER1) || defined(COMPILER2)
2738 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2739 #else
2740 return 0;
2741 #endif
2742 }
2743
2744 VMReg SharedRuntime::thread_register() {
2745 // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames).
2746 ShouldNotCallThis();
2747 return nullptr;
2748 }
2749
2750 #if defined(COMPILER1) || defined(COMPILER2)
2751 // Frame generation for deopt and uncommon trap blobs.
2752 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2753 /* Read */
2754 Register unroll_block_reg,
2755 /* Update */
2756 Register frame_sizes_reg,
2757 Register number_of_frames_reg,
2758 Register pcs_reg,
2759 /* Invalidate */
2760 Register frame_size_reg,
2761 Register pc_reg) {
2762
2763 __ ld(pc_reg, 0, pcs_reg);
2764 __ ld(frame_size_reg, 0, frame_sizes_reg);
2765 __ std(pc_reg, _abi0(lr), R1_SP);
2766 __ push_frame(frame_size_reg, R0/*tmp*/);
2767 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
2768 __ addi(number_of_frames_reg, number_of_frames_reg, -1);
2769 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
2770 __ addi(pcs_reg, pcs_reg, wordSize);
2771 }
2772
2773 // Loop through the UnrollBlock info and create new frames.
2774 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
2775 /* read */
2776 Register unroll_block_reg,
2777 /* invalidate */
2778 Register frame_sizes_reg,
2779 Register number_of_frames_reg,
2780 Register pcs_reg,
2781 Register frame_size_reg,
2782 Register pc_reg) {
2783 Label loop;
2784
2785 // _number_of_frames is of type int (deoptimization.hpp)
2786 __ lwa(number_of_frames_reg,
2787 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()),
2788 unroll_block_reg);
2789 __ ld(pcs_reg,
2790 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()),
2791 unroll_block_reg);
2792 __ ld(frame_sizes_reg,
2793 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()),
2794 unroll_block_reg);
2795
2796 // stack: (caller_of_deoptee, ...).
2797
2798 // At this point we either have an interpreter frame or a compiled
2799 // frame on top of stack. If it is a compiled frame we push a new c2i
2800 // adapter here
2801
2802 // Memorize top-frame stack-pointer.
2803 __ mr(frame_size_reg/*old_sp*/, R1_SP);
2804
2805 // Resize interpreter top frame OR C2I adapter.
2806
2807 // At this moment, the top frame (which is the caller of the deoptee) is
2808 // an interpreter frame or a newly pushed C2I adapter or an entry frame.
2809 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
2810 // outgoing arguments.
2811 //
2812 // In order to push the interpreter frame for the deoptee, we need to
2813 // resize the top frame such that we are able to place the deoptee's
2814 // locals in the frame.
2815 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
2816 // into a valid PARENT_IJAVA_FRAME_ABI.
2817
2818 __ lwa(R11_scratch1,
2819 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()),
2820 unroll_block_reg);
2821 __ neg(R11_scratch1, R11_scratch1);
2822
2823 // R11_scratch1 contains size of locals for frame resizing.
2824 // R12_scratch2 contains top frame's lr.
2825
2826 // Resize frame by complete frame size prevents TOC from being
2827 // overwritten by locals. A more stack space saving way would be
2828 // to copy the TOC to its location in the new abi.
2829 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
2830
2831 // now, resize the frame
2832 __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
2833
2834 // In the case where we have resized a c2i frame above, the optional
2835 // alignment below the locals has size 32 (why?).
2836 __ std(R12_scratch2, _abi0(lr), R1_SP);
2837
2838 // Initialize initial_caller_sp.
2839 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
2840
2841 #ifdef ASSERT
2842 // Make sure that there is at least one entry in the array.
2843 __ cmpdi(CR0, number_of_frames_reg, 0);
2844 __ asm_assert_ne("array_size must be > 0");
2845 #endif
2846
2847 // Now push the new interpreter frames.
2848 //
2849 __ bind(loop);
2850 // Allocate a new frame, fill in the pc.
2851 push_skeleton_frame(masm, deopt,
2852 unroll_block_reg,
2853 frame_sizes_reg,
2854 number_of_frames_reg,
2855 pcs_reg,
2856 frame_size_reg,
2857 pc_reg);
2858 __ cmpdi(CR0, number_of_frames_reg, 0);
2859 __ bne(CR0, loop);
2860
2861 // Get the return address pointing into the template interpreter.
2862 __ ld(R0, 0, pcs_reg);
2863 // Store it in the top interpreter frame.
2864 __ std(R0, _abi0(lr), R1_SP);
2865 // Initialize frame_manager_lr of interpreter top frame.
2866 }
2867 #endif
2868
2869 void SharedRuntime::generate_deopt_blob() {
2870 // Allocate space for the code
2871 ResourceMark rm;
2872 // Setup code generation tools
2873 const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2874 CodeBuffer buffer(name, 2048, 1024);
2875 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
2876 Label exec_mode_initialized;
2877 OopMap* map = nullptr;
2878 OopMapSet *oop_maps = new OopMapSet();
2879
2880 // size of ABI112 plus spill slots for R3_RET and F1_RET.
2881 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size;
2882 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
2883 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
2884
2885 const Register exec_mode_reg = R21_tmp1;
2886
2887 const address start = __ pc();
2888 int exception_offset = 0;
2889 int exception_in_tls_offset = 0;
2890 int reexecute_offset = 0;
2891
2892 #if defined(COMPILER1) || defined(COMPILER2)
2893 // --------------------------------------------------------------------------
2894 // Prolog for non exception case!
2895
2896 // We have been called from the deopt handler of the deoptee.
2897 //
2898 // deoptee:
2899 // ...
2900 // call X
2901 // ...
2902 // deopt_handler: call_deopt_stub
2903 // cur. return pc --> ...
2904 //
2905 // The return_pc has been stored in the frame of the deoptee and
2906 // will replace the address of the deopt_handler in the call
2907 // to Deoptimization::fetch_unroll_info below.
2908
2909 // Push the "unpack frame"
2910 // Save everything in sight.
2911 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2912 &first_frame_size_in_bytes,
2913 /*generate_oop_map=*/ true,
2914 RegisterSaver::return_pc_is_lr,
2915 /*save_vectors*/ SuperwordUseVSX);
2916 assert(map != nullptr, "OopMap must have been created");
2917
2918 __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2919 // Save exec mode for unpack_frames.
2920 __ b(exec_mode_initialized);
2921
2922 // --------------------------------------------------------------------------
2923 // Prolog for exception case
2924
2925 // An exception is pending.
2926 // We have been called with a return (interpreter) or a jump (exception blob).
2927 //
2928 // - R3_ARG1: exception oop
2929 // - R4_ARG2: exception pc
2930
2931 exception_offset = __ pc() - start;
2932
2933 BLOCK_COMMENT("Prolog for exception case");
2934
2935 // Store exception oop and pc in thread (location known to GC).
2936 // This is needed since the call to "fetch_unroll_info()" may safepoint.
2937 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2938 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2939 __ std(R4_ARG2, _abi0(lr), R1_SP);
2940
2941 // Vanilla deoptimization with an exception pending in exception_oop.
2942 exception_in_tls_offset = __ pc() - start;
2943
2944 // Push the "unpack frame".
2945 // Save everything in sight.
2946 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2947 &first_frame_size_in_bytes,
2948 /*generate_oop_map=*/ false,
2949 RegisterSaver::return_pc_is_pre_saved,
2950 /*save_vectors*/ SuperwordUseVSX);
2951
2952 // Deopt during an exception. Save exec mode for unpack_frames.
2953 __ li(exec_mode_reg, Deoptimization::Unpack_exception);
2954
2955 // fall through
2956 #ifdef COMPILER1
2957 __ b(exec_mode_initialized);
2958
2959 // Reexecute entry, similar to c2 uncommon trap
2960 reexecute_offset = __ pc() - start;
2961
2962 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2963 &first_frame_size_in_bytes,
2964 /*generate_oop_map=*/ false,
2965 RegisterSaver::return_pc_is_pre_saved,
2966 /*save_vectors*/ SuperwordUseVSX);
2967 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
2968 #endif
2969
2970 // --------------------------------------------------------------------------
2971 __ BIND(exec_mode_initialized);
2972
2973 const Register unroll_block_reg = R22_tmp2;
2974
2975 // We need to set `last_Java_frame' because `fetch_unroll_info' will
2976 // call `last_Java_frame()'. The value of the pc in the frame is not
2977 // particularly important. It just needs to identify this blob.
2978 __ set_last_Java_frame(R1_SP, noreg);
2979
2980 // With EscapeAnalysis turned on, this call may safepoint!
2981 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg);
2982 address calls_return_pc = __ last_calls_return_pc();
2983 // Set an oopmap for the call site that describes all our saved registers.
2984 oop_maps->add_gc_map(calls_return_pc - start, map);
2985
2986 __ reset_last_Java_frame();
2987 // Save the return value.
2988 __ mr(unroll_block_reg, R3_RET);
2989
2990 // Restore only the result registers that have been saved
2991 // by save_volatile_registers(...).
2992 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes, /*save_vectors*/ SuperwordUseVSX);
2993
2994 // reload the exec mode from the UnrollBlock (it might have changed)
2995 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
2996 // In excp_deopt_mode, restore and clear exception oop which we
2997 // stored in the thread during exception entry above. The exception
2998 // oop will be the return value of this stub.
2999 Label skip_restore_excp;
3000 __ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception);
3001 __ bne(CR0, skip_restore_excp);
3002 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3003 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3004 __ li(R0, 0);
3005 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3006 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3007 __ BIND(skip_restore_excp);
3008
3009 __ pop_frame();
3010
3011 // stack: (deoptee, optional i2c, caller of deoptee, ...).
3012
3013 // pop the deoptee's frame
3014 __ pop_frame();
3015
3016 // stack: (caller_of_deoptee, ...).
3017
3018 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3019 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3020 // and the frame is effectively not resized.
3021 Register caller_sp = R23_tmp3;
3022 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3023 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3024
3025 // Loop through the `UnrollBlock' info and create interpreter frames.
3026 push_skeleton_frames(masm, true/*deopt*/,
3027 unroll_block_reg,
3028 R23_tmp3,
3029 R24_tmp4,
3030 R25_tmp5,
3031 R26_tmp6,
3032 R27_tmp7);
3033
3034 // stack: (skeletal interpreter frame, ..., optional skeletal
3035 // interpreter frame, optional c2i, caller of deoptee, ...).
3036
3037 // push an `unpack_frame' taking care of float / int return values.
3038 __ push_frame(frame_size_in_bytes, R0/*tmp*/);
3039
3040 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3041 // skeletal interpreter frame, optional c2i, caller of deoptee,
3042 // ...).
3043
3044 // Spill live volatile registers since we'll do a call.
3045 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3046 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3047
3048 // Let the unpacker layout information in the skeletal frames just
3049 // allocated.
3050 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true);
3051 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
3052 // This is a call to a LEAF method, so no oop map is required.
3053 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3054 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
3055 __ reset_last_Java_frame();
3056
3057 // Restore the volatiles saved above.
3058 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3059 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3060
3061 // Pop the unpack frame.
3062 __ pop_frame();
3063 __ restore_LR(R0);
3064
3065 // stack: (top interpreter frame, ..., optional interpreter frame,
3066 // optional c2i, caller of deoptee, ...).
3067
3068 // Initialize R14_state.
3069 __ restore_interpreter_state(R11_scratch1);
3070 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3071
3072 // Return to the interpreter entry point.
3073 __ blr();
3074 #else // !defined(COMPILER1) && !defined(COMPILER2)
3075 __ unimplemented("deopt blob needed only with compiler");
3076 #endif
3077
3078 // Make sure all code is generated
3079 __ flush();
3080
3081 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
3082 reexecute_offset, first_frame_size_in_bytes / wordSize);
3083 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3084 }
3085
3086 #ifdef COMPILER2
3087 UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
3088 // Allocate space for the code.
3089 ResourceMark rm;
3090 // Setup code generation tools.
3091 const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id);
3092 CodeBuffer buffer(name, 2048, 1024);
3093 if (buffer.blob() == nullptr) {
3094 return nullptr;
3095 }
3096 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
3097 address start = __ pc();
3098
3099 Register unroll_block_reg = R21_tmp1;
3100 Register klass_index_reg = R22_tmp2;
3101 Register unc_trap_reg = R23_tmp3;
3102 Register r_return_pc = R27_tmp7;
3103
3104 OopMapSet* oop_maps = new OopMapSet();
3105 int frame_size_in_bytes = frame::native_abi_reg_args_size;
3106 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
3107
3108 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3109
3110 // Push a dummy `unpack_frame' and call
3111 // `Deoptimization::uncommon_trap' to pack the compiled frame into a
3112 // vframe array and return the `UnrollBlock' information.
3113
3114 // Save LR to compiled frame.
3115 __ save_LR(R11_scratch1);
3116
3117 // Push an "uncommon_trap" frame.
3118 __ push_frame_reg_args(0, R11_scratch1);
3119
3120 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
3121
3122 // Set the `unpack_frame' as last_Java_frame.
3123 // `Deoptimization::uncommon_trap' expects it and considers its
3124 // sender frame as the deoptee frame.
3125 // Remember the offset of the instruction whose address will be
3126 // moved to R11_scratch1.
3127 address gc_map_pc = __ pc();
3128 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true);
3129 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3130
3131 __ mr(klass_index_reg, R3);
3132 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap);
3133 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
3134 R16_thread, klass_index_reg, R5_ARG3);
3135
3136 // Set an oopmap for the call site.
3137 oop_maps->add_gc_map(gc_map_pc - start, map);
3138
3139 __ reset_last_Java_frame();
3140
3141 // Pop the `unpack frame'.
3142 __ pop_frame();
3143
3144 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3145
3146 // Save the return value.
3147 __ mr(unroll_block_reg, R3_RET);
3148
3149 // Pop the uncommon_trap frame.
3150 __ pop_frame();
3151
3152 // stack: (caller_of_deoptee, ...).
3153
3154 #ifdef ASSERT
3155 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
3156 __ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
3157 __ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
3158 #endif
3159
3160 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3161 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3162 // and the frame is effectively not resized.
3163 Register caller_sp = R23_tmp3;
3164 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3165 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3166
3167 // Allocate new interpreter frame(s) and possibly a c2i adapter
3168 // frame.
3169 push_skeleton_frames(masm, false/*deopt*/,
3170 unroll_block_reg,
3171 R22_tmp2,
3172 R23_tmp3,
3173 R24_tmp4,
3174 R25_tmp5,
3175 R26_tmp6);
3176
3177 // stack: (skeletal interpreter frame, ..., optional skeletal
3178 // interpreter frame, optional c2i, caller of deoptee, ...).
3179
3180 // Push a dummy `unpack_frame' taking care of float return values.
3181 // Call `Deoptimization::unpack_frames' to layout information in the
3182 // interpreter frames just created.
3183
3184 // Push a simple "unpack frame" here.
3185 __ push_frame_reg_args(0, R11_scratch1);
3186
3187 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3188 // skeletal interpreter frame, optional c2i, caller of deoptee,
3189 // ...).
3190
3191 // Set the "unpack_frame" as last_Java_frame.
3192 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3193
3194 // Indicate it is the uncommon trap case.
3195 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
3196 // Let the unpacker layout information in the skeletal frames just
3197 // allocated.
3198 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3199 R16_thread, unc_trap_reg);
3200
3201 __ reset_last_Java_frame();
3202 // Pop the `unpack frame'.
3203 __ pop_frame();
3204 // Restore LR from top interpreter frame.
3205 __ restore_LR(R11_scratch1);
3206
3207 // stack: (top interpreter frame, ..., optional interpreter frame,
3208 // optional c2i, caller of deoptee, ...).
3209
3210 __ restore_interpreter_state(R11_scratch1);
3211 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3212
3213 // Return to the interpreter entry point.
3214 __ blr();
3215
3216 masm->flush();
3217
3218 return UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
3219 }
3220 #endif // COMPILER2
3221
3222 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
3223 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
3224 assert(StubRoutines::forward_exception_entry() != nullptr,
3225 "must be generated before");
3226 assert(is_polling_page_id(id), "expected a polling page stub id");
3227
3228 ResourceMark rm;
3229 OopMapSet *oop_maps = new OopMapSet();
3230 OopMap* map;
3231
3232 // Allocate space for the code. Setup code generation tools.
3233 const char* name = SharedRuntime::stub_name(id);
3234 CodeBuffer buffer(name, 2048, 1024);
3235 MacroAssembler* masm = new MacroAssembler(&buffer);
3236
3237 address start = __ pc();
3238 int frame_size_in_bytes = 0;
3239
3240 RegisterSaver::ReturnPCLocation return_pc_location;
3241 bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
3242 if (cause_return) {
3243 // Nothing to do here. The frame has already been popped in MachEpilogNode.
3244 // Register LR already contains the return pc.
3245 return_pc_location = RegisterSaver::return_pc_is_pre_saved;
3246 } else {
3247 // Use thread()->saved_exception_pc() as return pc.
3248 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3249 }
3250
3251 bool save_vectors = (id == StubId::shared_polling_page_vectors_safepoint_handler_id);
3252
3253 // Save registers, fpu state, and flags. Set R31 = return pc.
3254 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3255 &frame_size_in_bytes,
3256 /*generate_oop_map=*/ true,
3257 return_pc_location, save_vectors);
3258
3259 // The following is basically a call_VM. However, we need the precise
3260 // address of the call in order to generate an oopmap. Hence, we do all the
3261 // work ourselves.
3262 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3263
3264 // The return address must always be correct so that the frame constructor
3265 // never sees an invalid pc.
3266
3267 // Do the call
3268 __ call_VM_leaf(call_ptr, R16_thread);
3269 address calls_return_pc = __ last_calls_return_pc();
3270
3271 // Set an oopmap for the call site. This oopmap will map all
3272 // oop-registers and debug-info registers as callee-saved. This
3273 // will allow deoptimization at this safepoint to find all possible
3274 // debug-info recordings, as well as let GC find all oops.
3275 oop_maps->add_gc_map(calls_return_pc - start, map);
3276
3277 Label noException;
3278
3279 // Clear the last Java frame.
3280 __ reset_last_Java_frame();
3281
3282 BLOCK_COMMENT(" Check pending exception.");
3283 const Register pending_exception = R0;
3284 __ ld(pending_exception, thread_(pending_exception));
3285 __ cmpdi(CR0, pending_exception, 0);
3286 __ beq(CR0, noException);
3287
3288 // Exception pending
3289 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3290 frame_size_in_bytes,
3291 /*restore_ctr=*/true, save_vectors);
3292
3293 BLOCK_COMMENT(" Jump to forward_exception_entry.");
3294 // Jump to forward_exception_entry, with the issuing PC in LR
3295 // so it looks like the original nmethod called forward_exception_entry.
3296 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3297
3298 // No exception case.
3299 __ BIND(noException);
3300
3301 if (!cause_return) {
3302 Label no_adjust;
3303 // If our stashed return pc was modified by the runtime we avoid touching it
3304 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP);
3305 __ cmpd(CR0, R0, R31);
3306 __ bne(CR0, no_adjust);
3307
3308 // Adjust return pc forward to step over the safepoint poll instruction
3309 __ addi(R31, R31, 4);
3310 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
3311
3312 __ bind(no_adjust);
3313 }
3314
3315 // Normal exit, restore registers and exit.
3316 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3317 frame_size_in_bytes,
3318 /*restore_ctr=*/true, save_vectors);
3319
3320 __ blr();
3321
3322 // Make sure all code is generated
3323 masm->flush();
3324
3325 // Fill-out other meta info
3326 // CodeBlob frame size is in words.
3327 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3328 }
3329
3330 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3331 //
3332 // Generate a stub that calls into the vm to find out the proper destination
3333 // of a java call. All the argument registers are live at this point
3334 // but since this is generic code we don't know what they are and the caller
3335 // must do any gc of the args.
3336 //
3337 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
3338 assert(is_resolve_id(id), "expected a resolve stub id");
3339
3340 // allocate space for the code
3341 ResourceMark rm;
3342
3343 const char* name = SharedRuntime::stub_name(id);
3344 CodeBuffer buffer(name, 1000, 512);
3345 MacroAssembler* masm = new MacroAssembler(&buffer);
3346
3347 int frame_size_in_bytes;
3348
3349 OopMapSet *oop_maps = new OopMapSet();
3350 OopMap* map = nullptr;
3351
3352 address start = __ pc();
3353
3354 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3355 &frame_size_in_bytes,
3356 /*generate_oop_map*/ true,
3357 RegisterSaver::return_pc_is_lr);
3358
3359 // Use noreg as last_Java_pc, the return pc will be reconstructed
3360 // from the physical frame.
3361 __ set_last_Java_frame(/*sp*/R1_SP, noreg);
3362
3363 int frame_complete = __ offset();
3364
3365 // Pass R19_method as 2nd (optional) argument, used by
3366 // counter_overflow_stub.
3367 __ call_VM_leaf(destination, R16_thread, R19_method);
3368 address calls_return_pc = __ last_calls_return_pc();
3369 // Set an oopmap for the call site.
3370 // We need this not only for callee-saved registers, but also for volatile
3371 // registers that the compiler might be keeping live across a safepoint.
3372 // Create the oopmap for the call's return pc.
3373 oop_maps->add_gc_map(calls_return_pc - start, map);
3374
3375 // R3_RET contains the address we are going to jump to assuming no exception got installed.
3376
3377 // clear last_Java_sp
3378 __ reset_last_Java_frame();
3379
3380 // Check for pending exceptions.
3381 BLOCK_COMMENT("Check for pending exceptions.");
3382 Label pending;
3383 __ ld(R11_scratch1, thread_(pending_exception));
3384 __ cmpdi(CR0, R11_scratch1, 0);
3385 __ bne(CR0, pending);
3386
3387 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
3388
3389 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
3390
3391 // Get the returned method.
3392 __ get_vm_result_metadata(R19_method);
3393
3394 __ bctr();
3395
3396
3397 // Pending exception after the safepoint.
3398 __ BIND(pending);
3399
3400 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
3401
3402 // exception pending => remove activation and forward to exception handler
3403
3404 __ li(R11_scratch1, 0);
3405 __ ld(R3_ARG1, thread_(pending_exception));
3406 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_oop_offset()), R16_thread);
3407 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3408
3409 // -------------
3410 // Make sure all code is generated.
3411 masm->flush();
3412
3413 // return the blob
3414 // frame_size_words or bytes??
3415 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
3416 oop_maps, true);
3417 }
3418
3419 // Continuation point for throwing of implicit exceptions that are
3420 // not handled in the current activation. Fabricates an exception
3421 // oop and initiates normal exception dispatching in this
3422 // frame. Only callee-saved registers are preserved (through the
3423 // normal register window / RegisterMap handling). If the compiler
3424 // needs all registers to be preserved between the fault point and
3425 // the exception handler then it must assume responsibility for that
3426 // in AbstractCompiler::continuation_for_implicit_null_exception or
3427 // continuation_for_implicit_division_by_zero_exception. All other
3428 // implicit exceptions (e.g., NullPointerException or
3429 // AbstractMethodError on entry) are either at call sites or
3430 // otherwise assume that stack unwinding will be initiated, so
3431 // caller saved registers were assumed volatile in the compiler.
3432 //
3433 // Note that we generate only this stub into a RuntimeStub, because
3434 // it needs to be properly traversed and ignored during GC, so we
3435 // change the meaning of the "__" macro within this method.
3436 //
3437 // Note: the routine set_pc_not_at_call_for_caller in
3438 // SharedRuntime.cpp requires that this code be generated into a
3439 // RuntimeStub.
3440 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3441 assert(is_throw_id(id), "expected a throw stub id");
3442
3443 const char* name = SharedRuntime::stub_name(id);
3444
3445 ResourceMark rm;
3446 const char* timer_msg = "SharedRuntime generate_throw_exception";
3447 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3448
3449 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
3450 MacroAssembler* masm = new MacroAssembler(&code);
3451
3452 OopMapSet* oop_maps = new OopMapSet();
3453 int frame_size_in_bytes = frame::native_abi_reg_args_size;
3454 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
3455
3456 address start = __ pc();
3457
3458 __ save_LR(R11_scratch1);
3459
3460 // Push a frame.
3461 __ push_frame_reg_args(0, R11_scratch1);
3462
3463 address frame_complete_pc = __ pc();
3464
3465 // Note that we always have a runtime stub frame on the top of
3466 // stack by this point. Remember the offset of the instruction
3467 // whose address will be moved to R11_scratch1.
3468 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
3469
3470 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
3471
3472 __ mr(R3_ARG1, R16_thread);
3473 __ call_c(runtime_entry);
3474
3475 // Set an oopmap for the call site.
3476 oop_maps->add_gc_map((int)(gc_map_pc - start), map);
3477
3478 __ reset_last_Java_frame();
3479
3480 #ifdef ASSERT
3481 // Make sure that this code is only executed if there is a pending
3482 // exception.
3483 {
3484 Label L;
3485 __ ld(R0,
3486 in_bytes(Thread::pending_exception_offset()),
3487 R16_thread);
3488 __ cmpdi(CR0, R0, 0);
3489 __ bne(CR0, L);
3490 __ stop("SharedRuntime::throw_exception: no pending exception");
3491 __ bind(L);
3492 }
3493 #endif
3494
3495 // Pop frame.
3496 __ pop_frame();
3497
3498 __ restore_LR(R11_scratch1);
3499
3500 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
3501 __ mtctr(R11_scratch1);
3502 __ bctr();
3503
3504 // Create runtime stub with OopMap.
3505 RuntimeStub* stub =
3506 RuntimeStub::new_runtime_stub(name, &code,
3507 /*frame_complete=*/ (int)(frame_complete_pc - start),
3508 frame_size_in_bytes/wordSize,
3509 oop_maps,
3510 false);
3511 return stub;
3512 }
3513
3514 //------------------------------Montgomery multiplication------------------------
3515 //
3516
3517 // Subtract 0:b from carry:a. Return carry.
3518 static unsigned long
3519 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3520 long i = 0;
3521 unsigned long tmp, tmp2;
3522 __asm__ __volatile__ (
3523 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
3524 "mtctr %[len] \n"
3525 "0: \n"
3526 "ldx %[tmp], %[i], %[a] \n"
3527 "ldx %[tmp2], %[i], %[b] \n"
3528 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
3529 "stdx %[tmp], %[i], %[a] \n"
3530 "addi %[i], %[i], 8 \n"
3531 "bdnz 0b \n"
3532 "addme %[tmp], %[carry] \n" // carry + CA - 1
3533 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
3534 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
3535 : "ctr", "xer", "memory"
3536 );
3537 return tmp;
3538 }
3539
3540 // Multiply (unsigned) Long A by Long B, accumulating the double-
3541 // length result into the accumulator formed of T0, T1, and T2.
3542 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3543 unsigned long hi, lo;
3544 __asm__ __volatile__ (
3545 "mulld %[lo], %[A], %[B] \n"
3546 "mulhdu %[hi], %[A], %[B] \n"
3547 "addc %[T0], %[T0], %[lo] \n"
3548 "adde %[T1], %[T1], %[hi] \n"
3549 "addze %[T2], %[T2] \n"
3550 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3551 : [A]"r"(A), [B]"r"(B)
3552 : "xer"
3553 );
3554 }
3555
3556 // As above, but add twice the double-length result into the
3557 // accumulator.
3558 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3559 unsigned long hi, lo;
3560 __asm__ __volatile__ (
3561 "mulld %[lo], %[A], %[B] \n"
3562 "mulhdu %[hi], %[A], %[B] \n"
3563 "addc %[T0], %[T0], %[lo] \n"
3564 "adde %[T1], %[T1], %[hi] \n"
3565 "addze %[T2], %[T2] \n"
3566 "addc %[T0], %[T0], %[lo] \n"
3567 "adde %[T1], %[T1], %[hi] \n"
3568 "addze %[T2], %[T2] \n"
3569 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3570 : [A]"r"(A), [B]"r"(B)
3571 : "xer"
3572 );
3573 }
3574
3575 // Fast Montgomery multiplication. The derivation of the algorithm is
3576 // in "A Cryptographic Library for the Motorola DSP56000,
3577 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
3578 static void
3579 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3580 unsigned long m[], unsigned long inv, int len) {
3581 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3582 int i;
3583
3584 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3585
3586 for (i = 0; i < len; i++) {
3587 int j;
3588 for (j = 0; j < i; j++) {
3589 MACC(a[j], b[i-j], t0, t1, t2);
3590 MACC(m[j], n[i-j], t0, t1, t2);
3591 }
3592 MACC(a[i], b[0], t0, t1, t2);
3593 m[i] = t0 * inv;
3594 MACC(m[i], n[0], t0, t1, t2);
3595
3596 assert(t0 == 0, "broken Montgomery multiply");
3597
3598 t0 = t1; t1 = t2; t2 = 0;
3599 }
3600
3601 for (i = len; i < 2*len; i++) {
3602 int j;
3603 for (j = i-len+1; j < len; j++) {
3604 MACC(a[j], b[i-j], t0, t1, t2);
3605 MACC(m[j], n[i-j], t0, t1, t2);
3606 }
3607 m[i-len] = t0;
3608 t0 = t1; t1 = t2; t2 = 0;
3609 }
3610
3611 while (t0) {
3612 t0 = sub(m, n, t0, len);
3613 }
3614 }
3615
3616 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3617 // multiplies so it should be up to 25% faster than Montgomery
3618 // multiplication. However, its loop control is more complex and it
3619 // may actually run slower on some machines.
3620 static void
3621 montgomery_square(unsigned long a[], unsigned long n[],
3622 unsigned long m[], unsigned long inv, int len) {
3623 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3624 int i;
3625
3626 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3627
3628 for (i = 0; i < len; i++) {
3629 int j;
3630 int end = (i+1)/2;
3631 for (j = 0; j < end; j++) {
3632 MACC2(a[j], a[i-j], t0, t1, t2);
3633 MACC(m[j], n[i-j], t0, t1, t2);
3634 }
3635 if ((i & 1) == 0) {
3636 MACC(a[j], a[j], t0, t1, t2);
3637 }
3638 for (; j < i; j++) {
3639 MACC(m[j], n[i-j], t0, t1, t2);
3640 }
3641 m[i] = t0 * inv;
3642 MACC(m[i], n[0], t0, t1, t2);
3643
3644 assert(t0 == 0, "broken Montgomery square");
3645
3646 t0 = t1; t1 = t2; t2 = 0;
3647 }
3648
3649 for (i = len; i < 2*len; i++) {
3650 int start = i-len+1;
3651 int end = start + (len - start)/2;
3652 int j;
3653 for (j = start; j < end; j++) {
3654 MACC2(a[j], a[i-j], t0, t1, t2);
3655 MACC(m[j], n[i-j], t0, t1, t2);
3656 }
3657 if ((i & 1) == 0) {
3658 MACC(a[j], a[j], t0, t1, t2);
3659 }
3660 for (; j < len; j++) {
3661 MACC(m[j], n[i-j], t0, t1, t2);
3662 }
3663 m[i-len] = t0;
3664 t0 = t1; t1 = t2; t2 = 0;
3665 }
3666
3667 while (t0) {
3668 t0 = sub(m, n, t0, len);
3669 }
3670 }
3671
3672 // The threshold at which squaring is advantageous was determined
3673 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3674 // Doesn't seem to be relevant for Power8 so we use the same value.
3675 #define MONTGOMERY_SQUARING_THRESHOLD 64
3676
3677 // Copy len longwords from s to d, word-swapping as we go. The
3678 // destination array is reversed.
3679 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3680 d += len;
3681 while(len-- > 0) {
3682 d--;
3683 unsigned long s_val = *s;
3684 // Swap words in a longword on little endian machines.
3685 #ifdef VM_LITTLE_ENDIAN
3686 s_val = (s_val << 32) | (s_val >> 32);
3687 #endif
3688 *d = s_val;
3689 s++;
3690 }
3691 }
3692
3693 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3694 jint len, jlong inv,
3695 jint *m_ints) {
3696 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3697 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3698 int longwords = len/2;
3699
3700 // Make very sure we don't use so much space that the stack might
3701 // overflow. 512 jints corresponds to an 16384-bit integer and
3702 // will use here a total of 8k bytes of stack space.
3703 int divisor = sizeof(unsigned long) * 4;
3704 guarantee(longwords <= 8192 / divisor, "must be");
3705 int total_allocation = longwords * sizeof (unsigned long) * 4;
3706 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3707
3708 // Local scratch arrays
3709 unsigned long
3710 *a = scratch + 0 * longwords,
3711 *b = scratch + 1 * longwords,
3712 *n = scratch + 2 * longwords,
3713 *m = scratch + 3 * longwords;
3714
3715 reverse_words((unsigned long *)a_ints, a, longwords);
3716 reverse_words((unsigned long *)b_ints, b, longwords);
3717 reverse_words((unsigned long *)n_ints, n, longwords);
3718
3719 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3720
3721 reverse_words(m, (unsigned long *)m_ints, longwords);
3722 }
3723
3724 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3725 jint len, jlong inv,
3726 jint *m_ints) {
3727 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3728 assert(len % 2 == 0, "array length in montgomery_square must be even");
3729 int longwords = len/2;
3730
3731 // Make very sure we don't use so much space that the stack might
3732 // overflow. 512 jints corresponds to an 16384-bit integer and
3733 // will use here a total of 6k bytes of stack space.
3734 int divisor = sizeof(unsigned long) * 3;
3735 guarantee(longwords <= (8192 / divisor), "must be");
3736 int total_allocation = longwords * sizeof (unsigned long) * 3;
3737 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3738
3739 // Local scratch arrays
3740 unsigned long
3741 *a = scratch + 0 * longwords,
3742 *n = scratch + 1 * longwords,
3743 *m = scratch + 2 * longwords;
3744
3745 reverse_words((unsigned long *)a_ints, a, longwords);
3746 reverse_words((unsigned long *)n_ints, n, longwords);
3747
3748 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3749 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3750 } else {
3751 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3752 }
3753
3754 reverse_words(m, (unsigned long *)m_ints, longwords);
3755 }
3756
3757 #if INCLUDE_JFR
3758
3759 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3760 // It returns a jobject handle to the event writer.
3761 // The handle is dereferenced and the return value is the event writer oop.
3762 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3763 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
3764 CodeBuffer code(name, 512, 64);
3765 MacroAssembler* masm = new MacroAssembler(&code);
3766
3767 Register tmp1 = R10_ARG8;
3768 Register tmp2 = R9_ARG7;
3769
3770 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
3771 address start = __ pc();
3772 __ mflr(tmp1);
3773 __ std(tmp1, _abi0(lr), R1_SP); // save return pc
3774 __ push_frame_reg_args(0, tmp1);
3775 int frame_complete = __ pc() - start;
3776 __ set_last_Java_frame(R1_SP, noreg);
3777 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread);
3778 address calls_return_pc = __ last_calls_return_pc();
3779 __ reset_last_Java_frame();
3780 // The handle is dereferenced through a load barrier.
3781 __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
3782 __ pop_frame();
3783 __ ld(tmp1, _abi0(lr), R1_SP);
3784 __ mtlr(tmp1);
3785 __ blr();
3786
3787 OopMapSet* oop_maps = new OopMapSet();
3788 OopMap* map = new OopMap(framesize, 0);
3789 oop_maps->add_gc_map(calls_return_pc - start, map);
3790
3791 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3792 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3793 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3794 oop_maps, false);
3795 return stub;
3796 }
3797
3798 // For c2: call to return a leased buffer.
3799 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3800 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
3801 CodeBuffer code(name, 512, 64);
3802 MacroAssembler* masm = new MacroAssembler(&code);
3803
3804 Register tmp1 = R10_ARG8;
3805 Register tmp2 = R9_ARG7;
3806
3807 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
3808 address start = __ pc();
3809 __ mflr(tmp1);
3810 __ std(tmp1, _abi0(lr), R1_SP); // save return pc
3811 __ push_frame_reg_args(0, tmp1);
3812 int frame_complete = __ pc() - start;
3813 __ set_last_Java_frame(R1_SP, noreg);
3814 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread);
3815 address calls_return_pc = __ last_calls_return_pc();
3816 __ reset_last_Java_frame();
3817 __ pop_frame();
3818 __ ld(tmp1, _abi0(lr), R1_SP);
3819 __ mtlr(tmp1);
3820 __ blr();
3821
3822 OopMapSet* oop_maps = new OopMapSet();
3823 OopMap* map = new OopMap(framesize, 0);
3824 oop_maps->add_gc_map(calls_return_pc - start, map);
3825
3826 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3827 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3828 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3829 oop_maps, false);
3830 return stub;
3831 }
3832 #endif // INCLUDE_JFR
3833
3834 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
3835 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
3836
3837 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
3838 Unimplemented();
3839 return 0;
3840 }
3841
3842 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3843 Unimplemented();
3844 return nullptr;
3845 }
3846