1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/debugInfoRec.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "frame_ppc.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "prims/methodHandles.hpp"
38 #include "runtime/continuation.hpp"
39 #include "runtime/continuationEntry.inline.hpp"
40 #include "runtime/jniHandles.hpp"
41 #include "runtime/os.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/signature.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/timerTrace.hpp"
47 #include "runtime/vframeArray.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/macros.hpp"
50 #include "vmreg_ppc.inline.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Runtime1.hpp"
53 #endif
54 #ifdef COMPILER2
55 #include "opto/ad.hpp"
56 #include "opto/runtime.hpp"
57 #endif
58
59 #include <alloca.h>
60
61 #define __ masm->
62
63 #ifdef PRODUCT
64 #define BLOCK_COMMENT(str) // nothing
65 #else
66 #define BLOCK_COMMENT(str) __ block_comment(str)
67 #endif
68
69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
70
71
72 class RegisterSaver {
73 // Used for saving volatile registers.
74 public:
75
76 // Support different return pc locations.
77 enum ReturnPCLocation {
78 return_pc_is_lr,
79 return_pc_is_pre_saved,
80 return_pc_is_thread_saved_exception_pc
81 };
82
83 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
84 int* out_frame_size_in_bytes,
85 bool generate_oop_map,
86 ReturnPCLocation return_pc_location,
87 bool save_vectors = false);
88 static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
89 int frame_size_in_bytes,
90 bool restore_ctr,
91 bool save_vectors = false);
92
93 static void push_frame_and_save_argument_registers(MacroAssembler* masm,
94 Register r_temp,
95 int frame_size,
96 int total_args,
97 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
98 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
99 int frame_size,
100 int total_args,
101 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
102
103 // During deoptimization only the result registers need to be restored
104 // all the other values have already been extracted.
105 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors);
106
107 // Constants and data structures:
108
109 typedef enum {
110 int_reg,
111 float_reg,
112 special_reg,
113 vec_reg
114 } RegisterType;
115
116 typedef enum {
117 reg_size = 8,
118 half_reg_size = reg_size / 2,
119 vec_reg_size = 16
120 } RegisterConstants;
121
122 typedef struct {
123 RegisterType reg_type;
124 int reg_num;
125 VMReg vmreg;
126 } LiveRegType;
127 };
128
129
130 #define RegisterSaver_LiveIntReg(regname) \
131 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() }
132
133 #define RegisterSaver_LiveFloatReg(regname) \
134 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() }
135
136 #define RegisterSaver_LiveSpecialReg(regname) \
137 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
138
139 #define RegisterSaver_LiveVecReg(regname) \
140 { RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() }
141
142 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
143 // Live registers which get spilled to the stack. Register
144 // positions in this array correspond directly to the stack layout.
145
146 //
147 // live special registers:
148 //
149 RegisterSaver_LiveSpecialReg(SR_CTR),
150 //
151 // live float registers:
152 //
153 RegisterSaver_LiveFloatReg( F0 ),
154 RegisterSaver_LiveFloatReg( F1 ),
155 RegisterSaver_LiveFloatReg( F2 ),
156 RegisterSaver_LiveFloatReg( F3 ),
157 RegisterSaver_LiveFloatReg( F4 ),
158 RegisterSaver_LiveFloatReg( F5 ),
159 RegisterSaver_LiveFloatReg( F6 ),
160 RegisterSaver_LiveFloatReg( F7 ),
161 RegisterSaver_LiveFloatReg( F8 ),
162 RegisterSaver_LiveFloatReg( F9 ),
163 RegisterSaver_LiveFloatReg( F10 ),
164 RegisterSaver_LiveFloatReg( F11 ),
165 RegisterSaver_LiveFloatReg( F12 ),
166 RegisterSaver_LiveFloatReg( F13 ),
167 RegisterSaver_LiveFloatReg( F14 ),
168 RegisterSaver_LiveFloatReg( F15 ),
169 RegisterSaver_LiveFloatReg( F16 ),
170 RegisterSaver_LiveFloatReg( F17 ),
171 RegisterSaver_LiveFloatReg( F18 ),
172 RegisterSaver_LiveFloatReg( F19 ),
173 RegisterSaver_LiveFloatReg( F20 ),
174 RegisterSaver_LiveFloatReg( F21 ),
175 RegisterSaver_LiveFloatReg( F22 ),
176 RegisterSaver_LiveFloatReg( F23 ),
177 RegisterSaver_LiveFloatReg( F24 ),
178 RegisterSaver_LiveFloatReg( F25 ),
179 RegisterSaver_LiveFloatReg( F26 ),
180 RegisterSaver_LiveFloatReg( F27 ),
181 RegisterSaver_LiveFloatReg( F28 ),
182 RegisterSaver_LiveFloatReg( F29 ),
183 RegisterSaver_LiveFloatReg( F30 ),
184 RegisterSaver_LiveFloatReg( F31 ),
185 //
186 // live integer registers:
187 //
188 RegisterSaver_LiveIntReg( R0 ),
189 //RegisterSaver_LiveIntReg( R1 ), // stack pointer
190 RegisterSaver_LiveIntReg( R2 ),
191 RegisterSaver_LiveIntReg( R3 ),
192 RegisterSaver_LiveIntReg( R4 ),
193 RegisterSaver_LiveIntReg( R5 ),
194 RegisterSaver_LiveIntReg( R6 ),
195 RegisterSaver_LiveIntReg( R7 ),
196 RegisterSaver_LiveIntReg( R8 ),
197 RegisterSaver_LiveIntReg( R9 ),
198 RegisterSaver_LiveIntReg( R10 ),
199 RegisterSaver_LiveIntReg( R11 ),
200 RegisterSaver_LiveIntReg( R12 ),
201 //RegisterSaver_LiveIntReg( R13 ), // system thread id
202 RegisterSaver_LiveIntReg( R14 ),
203 RegisterSaver_LiveIntReg( R15 ),
204 RegisterSaver_LiveIntReg( R16 ),
205 RegisterSaver_LiveIntReg( R17 ),
206 RegisterSaver_LiveIntReg( R18 ),
207 RegisterSaver_LiveIntReg( R19 ),
208 RegisterSaver_LiveIntReg( R20 ),
209 RegisterSaver_LiveIntReg( R21 ),
210 RegisterSaver_LiveIntReg( R22 ),
211 RegisterSaver_LiveIntReg( R23 ),
212 RegisterSaver_LiveIntReg( R24 ),
213 RegisterSaver_LiveIntReg( R25 ),
214 RegisterSaver_LiveIntReg( R26 ),
215 RegisterSaver_LiveIntReg( R27 ),
216 RegisterSaver_LiveIntReg( R28 ),
217 RegisterSaver_LiveIntReg( R29 ),
218 RegisterSaver_LiveIntReg( R30 ),
219 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below)
220 };
221
222 static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
223 //
224 // live vector registers (optional, only these ones are used by C2):
225 //
226 RegisterSaver_LiveVecReg( VR0 ),
227 RegisterSaver_LiveVecReg( VR1 ),
228 RegisterSaver_LiveVecReg( VR2 ),
229 RegisterSaver_LiveVecReg( VR3 ),
230 RegisterSaver_LiveVecReg( VR4 ),
231 RegisterSaver_LiveVecReg( VR5 ),
232 RegisterSaver_LiveVecReg( VR6 ),
233 RegisterSaver_LiveVecReg( VR7 ),
234 RegisterSaver_LiveVecReg( VR8 ),
235 RegisterSaver_LiveVecReg( VR9 ),
236 RegisterSaver_LiveVecReg( VR10 ),
237 RegisterSaver_LiveVecReg( VR11 ),
238 RegisterSaver_LiveVecReg( VR12 ),
239 RegisterSaver_LiveVecReg( VR13 ),
240 RegisterSaver_LiveVecReg( VR14 ),
241 RegisterSaver_LiveVecReg( VR15 ),
242 RegisterSaver_LiveVecReg( VR16 ),
243 RegisterSaver_LiveVecReg( VR17 ),
244 RegisterSaver_LiveVecReg( VR18 ),
245 RegisterSaver_LiveVecReg( VR19 ),
246 RegisterSaver_LiveVecReg( VR20 ),
247 RegisterSaver_LiveVecReg( VR21 ),
248 RegisterSaver_LiveVecReg( VR22 ),
249 RegisterSaver_LiveVecReg( VR23 ),
250 RegisterSaver_LiveVecReg( VR24 ),
251 RegisterSaver_LiveVecReg( VR25 ),
252 RegisterSaver_LiveVecReg( VR26 ),
253 RegisterSaver_LiveVecReg( VR27 ),
254 RegisterSaver_LiveVecReg( VR28 ),
255 RegisterSaver_LiveVecReg( VR29 ),
256 RegisterSaver_LiveVecReg( VR30 ),
257 RegisterSaver_LiveVecReg( VR31 )
258 };
259
260
261 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
262 int* out_frame_size_in_bytes,
263 bool generate_oop_map,
264 ReturnPCLocation return_pc_location,
265 bool save_vectors) {
266 // Push an abi_reg_args-frame and store all registers which may be live.
267 // If requested, create an OopMap: Record volatile registers as
268 // callee-save values in an OopMap so their save locations will be
269 // propagated to the RegisterMap of the caller frame during
270 // StackFrameStream construction (needed for deoptimization; see
271 // compiledVFrame::create_stack_value).
272 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
273
274 // calculate frame size
275 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
276 sizeof(RegisterSaver::LiveRegType);
277 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
278 sizeof(RegisterSaver::LiveRegType))
279 : 0;
280 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
281 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
282 + frame::native_abi_reg_args_size;
283
284 *out_frame_size_in_bytes = frame_size_in_bytes;
285 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
286 const int register_save_offset = frame_size_in_bytes - register_save_size;
287
288 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
289 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr;
290
291 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
292
293 // push a new frame
294 __ push_frame(frame_size_in_bytes, noreg);
295
296 // Save some registers in the last (non-vector) slots of the new frame so we
297 // can use them as scratch regs or to determine the return pc.
298 __ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
299 __ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP);
300
301 // save the flags
302 // Do the save_LR by hand and adjust the return pc if requested.
303 switch (return_pc_location) {
304 case return_pc_is_lr: __ mflr(R31); break;
305 case return_pc_is_pre_saved: break;
306 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
307 default: ShouldNotReachHere();
308 }
309 if (return_pc_location != return_pc_is_pre_saved) {
310 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
311 }
312
313 // save all registers (ints and floats)
314 int offset = register_save_offset;
315
316 for (int i = 0; i < regstosave_num; i++) {
317 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
318 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
319
320 switch (reg_type) {
321 case RegisterSaver::int_reg: {
322 if (reg_num < 30) { // We spilled R30-31 right at the beginning.
323 __ std(as_Register(reg_num), offset, R1_SP);
324 }
325 break;
326 }
327 case RegisterSaver::float_reg: {
328 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
329 break;
330 }
331 case RegisterSaver::special_reg: {
332 if (reg_num == SR_CTR.encoding()) {
333 __ mfctr(R30);
334 __ std(R30, offset, R1_SP);
335 } else {
336 Unimplemented();
337 }
338 break;
339 }
340 default:
341 ShouldNotReachHere();
342 }
343
344 if (generate_oop_map) {
345 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
346 RegisterSaver_LiveRegs[i].vmreg);
347 }
348 offset += reg_size;
349 }
350
351 // Note that generate_oop_map in the following loop is only used for the
352 // polling_page_vectors_safepoint_handler_blob and the deopt_blob.
353 // The order in which the vector contents are stored depends on Endianess and
354 // the utilized instructions (PowerArchitecturePPC64).
355 assert(is_aligned(offset, StackAlignmentInBytes), "should be");
356 if (PowerArchitecturePPC64 >= 10) {
357 assert(is_even(vecregstosave_num), "expectation");
358 for (int i = 0; i < vecregstosave_num; i += 2) {
359 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
360 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
361
362 __ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
363 // Note: The contents were read in the same order (see loadV16 node in ppc.ad).
364 // RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
365 if (generate_oop_map) {
366 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
367 RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
368 map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2),
369 RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
370 }
371 offset += (2 * vec_reg_size);
372 }
373 } else {
374 for (int i = 0; i < vecregstosave_num; i++) {
375 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
376
377 __ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP);
378 // Note: The contents were read in the same order (see loadV16 node in ppc.ad).
379 // RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
380 if (generate_oop_map) {
381 VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg;
382 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr);
383 }
384 offset += vec_reg_size;
385 }
386 }
387
388 assert(offset == frame_size_in_bytes, "consistency check");
389
390 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
391
392 // And we're done.
393 return map;
394 }
395
396
397 // Pop the current frame and restore all the registers that we
398 // saved.
399 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
400 int frame_size_in_bytes,
401 bool restore_ctr,
402 bool save_vectors) {
403 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
404 sizeof(RegisterSaver::LiveRegType);
405 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
406 sizeof(RegisterSaver::LiveRegType))
407 : 0;
408 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
409
410 const int register_save_offset = frame_size_in_bytes - register_save_size;
411
412 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
413
414 // restore all registers (ints and floats)
415 int offset = register_save_offset;
416
417 for (int i = 0; i < regstosave_num; i++) {
418 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
419 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
420
421 switch (reg_type) {
422 case RegisterSaver::int_reg: {
423 if (reg_num != 31) // R31 restored at the end, it's the tmp reg!
424 __ ld(as_Register(reg_num), offset, R1_SP);
425 break;
426 }
427 case RegisterSaver::float_reg: {
428 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
429 break;
430 }
431 case RegisterSaver::special_reg: {
432 if (reg_num == SR_CTR.encoding()) {
433 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
434 __ ld(R31, offset, R1_SP);
435 __ mtctr(R31);
436 }
437 } else {
438 Unimplemented();
439 }
440 break;
441 }
442 default:
443 ShouldNotReachHere();
444 }
445 offset += reg_size;
446 }
447
448 assert(is_aligned(offset, StackAlignmentInBytes), "should be");
449 if (PowerArchitecturePPC64 >= 10) {
450 for (int i = 0; i < vecregstosave_num; i += 2) {
451 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
452 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
453
454 __ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
455
456 offset += (2 * vec_reg_size);
457 }
458 } else {
459 for (int i = 0; i < vecregstosave_num; i++) {
460 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
461
462 __ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
463
464 offset += vec_reg_size;
465 }
466 }
467
468 assert(offset == frame_size_in_bytes, "consistency check");
469
470 // restore link and the flags
471 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
472 __ mtlr(R31);
473
474 // restore scratch register's value
475 __ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
476
477 // pop the frame
478 __ addi(R1_SP, R1_SP, frame_size_in_bytes);
479
480 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
481 }
482
483 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
484 int frame_size,int total_args, const VMRegPair *regs,
485 const VMRegPair *regs2) {
486 __ push_frame(frame_size, r_temp);
487 int st_off = frame_size - wordSize;
488 for (int i = 0; i < total_args; i++) {
489 VMReg r_1 = regs[i].first();
490 VMReg r_2 = regs[i].second();
491 if (!r_1->is_valid()) {
492 assert(!r_2->is_valid(), "");
493 continue;
494 }
495 if (r_1->is_Register()) {
496 Register r = r_1->as_Register();
497 __ std(r, st_off, R1_SP);
498 st_off -= wordSize;
499 } else if (r_1->is_FloatRegister()) {
500 FloatRegister f = r_1->as_FloatRegister();
501 __ stfd(f, st_off, R1_SP);
502 st_off -= wordSize;
503 }
504 }
505 if (regs2 != nullptr) {
506 for (int i = 0; i < total_args; i++) {
507 VMReg r_1 = regs2[i].first();
508 VMReg r_2 = regs2[i].second();
509 if (!r_1->is_valid()) {
510 assert(!r_2->is_valid(), "");
511 continue;
512 }
513 if (r_1->is_Register()) {
514 Register r = r_1->as_Register();
515 __ std(r, st_off, R1_SP);
516 st_off -= wordSize;
517 } else if (r_1->is_FloatRegister()) {
518 FloatRegister f = r_1->as_FloatRegister();
519 __ stfd(f, st_off, R1_SP);
520 st_off -= wordSize;
521 }
522 }
523 }
524 }
525
526 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
527 int total_args, const VMRegPair *regs,
528 const VMRegPair *regs2) {
529 int st_off = frame_size - wordSize;
530 for (int i = 0; i < total_args; i++) {
531 VMReg r_1 = regs[i].first();
532 VMReg r_2 = regs[i].second();
533 if (r_1->is_Register()) {
534 Register r = r_1->as_Register();
535 __ ld(r, st_off, R1_SP);
536 st_off -= wordSize;
537 } else if (r_1->is_FloatRegister()) {
538 FloatRegister f = r_1->as_FloatRegister();
539 __ lfd(f, st_off, R1_SP);
540 st_off -= wordSize;
541 }
542 }
543 if (regs2 != nullptr)
544 for (int i = 0; i < total_args; i++) {
545 VMReg r_1 = regs2[i].first();
546 VMReg r_2 = regs2[i].second();
547 if (r_1->is_Register()) {
548 Register r = r_1->as_Register();
549 __ ld(r, st_off, R1_SP);
550 st_off -= wordSize;
551 } else if (r_1->is_FloatRegister()) {
552 FloatRegister f = r_1->as_FloatRegister();
553 __ lfd(f, st_off, R1_SP);
554 st_off -= wordSize;
555 }
556 }
557 __ pop_frame();
558 }
559
560 // Restore the registers that might be holding a result.
561 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors) {
562 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
563 sizeof(RegisterSaver::LiveRegType);
564 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
565 sizeof(RegisterSaver::LiveRegType))
566 : 0;
567 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
568
569 const int register_save_offset = frame_size_in_bytes - register_save_size;
570
571 // restore all result registers (ints and floats)
572 int offset = register_save_offset;
573 for (int i = 0; i < regstosave_num; i++) {
574 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
575 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
576 switch (reg_type) {
577 case RegisterSaver::int_reg: {
578 if (as_Register(reg_num)==R3_RET) // int result_reg
579 __ ld(as_Register(reg_num), offset, R1_SP);
580 break;
581 }
582 case RegisterSaver::float_reg: {
583 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
584 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
585 break;
586 }
587 case RegisterSaver::special_reg: {
588 // Special registers don't hold a result.
589 break;
590 }
591 default:
592 ShouldNotReachHere();
593 }
594 offset += reg_size;
595 }
596
597 assert(offset == frame_size_in_bytes - (save_vectors ? vecregstosave_num * vec_reg_size : 0), "consistency check");
598 }
599
600 // Is vector's size (in bytes) bigger than a size saved by default?
601 bool SharedRuntime::is_wide_vector(int size) {
602 // Note, MaxVectorSize == 8/16 on PPC64.
603 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
604 return size > 8;
605 }
606
607 static int reg2slot(VMReg r) {
608 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
609 }
610
611 static int reg2offset(VMReg r) {
612 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
613 }
614
615 // ---------------------------------------------------------------------------
616 // Read the array of BasicTypes from a signature, and compute where the
617 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
618 // quantities. Values less than VMRegImpl::stack0 are registers, those above
619 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
620 // as framesizes are fixed.
621 // VMRegImpl::stack0 refers to the first slot 0(sp).
622 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
623 // up to Register::number_of_registers) are the 64-bit
624 // integer registers.
625
626 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
627 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
628 // units regardless of build. Of course for i486 there is no 64 bit build
629
630 // The Java calling convention is a "shifted" version of the C ABI.
631 // By skipping the first C ABI register we can call non-static jni methods
632 // with small numbers of arguments without having to shuffle the arguments
633 // at all. Since we control the java ABI we ought to at least get some
634 // advantage out of it.
635
636 const VMReg java_iarg_reg[8] = {
637 R3->as_VMReg(),
638 R4->as_VMReg(),
639 R5->as_VMReg(),
640 R6->as_VMReg(),
641 R7->as_VMReg(),
642 R8->as_VMReg(),
643 R9->as_VMReg(),
644 R10->as_VMReg()
645 };
646
647 const VMReg java_farg_reg[13] = {
648 F1->as_VMReg(),
649 F2->as_VMReg(),
650 F3->as_VMReg(),
651 F4->as_VMReg(),
652 F5->as_VMReg(),
653 F6->as_VMReg(),
654 F7->as_VMReg(),
655 F8->as_VMReg(),
656 F9->as_VMReg(),
657 F10->as_VMReg(),
658 F11->as_VMReg(),
659 F12->as_VMReg(),
660 F13->as_VMReg()
661 };
662
663 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
664 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
665
666 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j);
667 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j);
668
669 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
670 VMRegPair *regs,
671 int total_args_passed) {
672 // C2c calling conventions for compiled-compiled calls.
673 // Put 8 ints/longs into registers _AND_ 13 float/doubles into
674 // registers _AND_ put the rest on the stack.
675
676 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats
677 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
678
679 int i;
680 VMReg reg;
681 int stk = 0;
682 int ireg = 0;
683 int freg = 0;
684
685 // We put the first 8 arguments into registers and the rest on the
686 // stack, float arguments are already in their argument registers
687 // due to c2c calling conventions (see calling_convention).
688 for (int i = 0; i < total_args_passed; ++i) {
689 switch(sig_bt[i]) {
690 case T_BOOLEAN:
691 case T_CHAR:
692 case T_BYTE:
693 case T_SHORT:
694 case T_INT:
695 if (ireg < num_java_iarg_registers) {
696 // Put int/ptr in register
697 reg = java_iarg_reg[ireg];
698 ++ireg;
699 } else {
700 // Put int/ptr on stack.
701 reg = VMRegImpl::stack2reg(stk);
702 stk += inc_stk_for_intfloat;
703 }
704 regs[i].set1(reg);
705 break;
706 case T_LONG:
707 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
708 if (ireg < num_java_iarg_registers) {
709 // Put long in register.
710 reg = java_iarg_reg[ireg];
711 ++ireg;
712 } else {
713 // Put long on stack. They must be aligned to 2 slots.
714 if (stk & 0x1) ++stk;
715 reg = VMRegImpl::stack2reg(stk);
716 stk += inc_stk_for_longdouble;
717 }
718 regs[i].set2(reg);
719 break;
720 case T_OBJECT:
721 case T_ARRAY:
722 case T_ADDRESS:
723 if (ireg < num_java_iarg_registers) {
724 // Put ptr in register.
725 reg = java_iarg_reg[ireg];
726 ++ireg;
727 } else {
728 // Put ptr on stack. Objects must be aligned to 2 slots too,
729 // because "64-bit pointers record oop-ishness on 2 aligned
730 // adjacent registers." (see OopFlow::build_oop_map).
731 if (stk & 0x1) ++stk;
732 reg = VMRegImpl::stack2reg(stk);
733 stk += inc_stk_for_longdouble;
734 }
735 regs[i].set2(reg);
736 break;
737 case T_FLOAT:
738 if (freg < num_java_farg_registers) {
739 // Put float in register.
740 reg = java_farg_reg[freg];
741 ++freg;
742 } else {
743 // Put float on stack.
744 reg = VMRegImpl::stack2reg(stk);
745 stk += inc_stk_for_intfloat;
746 }
747 regs[i].set1(reg);
748 break;
749 case T_DOUBLE:
750 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
751 if (freg < num_java_farg_registers) {
752 // Put double in register.
753 reg = java_farg_reg[freg];
754 ++freg;
755 } else {
756 // Put double on stack. They must be aligned to 2 slots.
757 if (stk & 0x1) ++stk;
758 reg = VMRegImpl::stack2reg(stk);
759 stk += inc_stk_for_longdouble;
760 }
761 regs[i].set2(reg);
762 break;
763 case T_VOID:
764 // Do not count halves.
765 regs[i].set_bad();
766 break;
767 default:
768 ShouldNotReachHere();
769 }
770 }
771 return stk;
772 }
773
774 // Calling convention for calling C code.
775 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
776 VMRegPair *regs,
777 int total_args_passed) {
778 // Calling conventions for C runtime calls and calls to JNI native methods.
779 //
780 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
781 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
782 // the first 13 flt/dbl's in the first 13 fp regs but additionally
783 // copy flt/dbl to the stack if they are beyond the 8th argument.
784
785 const VMReg iarg_reg[8] = {
786 R3->as_VMReg(),
787 R4->as_VMReg(),
788 R5->as_VMReg(),
789 R6->as_VMReg(),
790 R7->as_VMReg(),
791 R8->as_VMReg(),
792 R9->as_VMReg(),
793 R10->as_VMReg()
794 };
795
796 const VMReg farg_reg[13] = {
797 F1->as_VMReg(),
798 F2->as_VMReg(),
799 F3->as_VMReg(),
800 F4->as_VMReg(),
801 F5->as_VMReg(),
802 F6->as_VMReg(),
803 F7->as_VMReg(),
804 F8->as_VMReg(),
805 F9->as_VMReg(),
806 F10->as_VMReg(),
807 F11->as_VMReg(),
808 F12->as_VMReg(),
809 F13->as_VMReg()
810 };
811
812 // Check calling conventions consistency.
813 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
814 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
815 "consistency");
816
817 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size)
818 / VMRegImpl::stack_slot_size);
819 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size;
820
821 VMReg reg;
822 int arg = 0;
823 int freg = 0;
824 bool stack_used = false;
825
826 for (int i = 0; i < total_args_passed; ++i, ++arg) {
827 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted)
828 int stk = (arg * 2) + additional_frame_header_slots;
829
830 switch(sig_bt[i]) {
831 //
832 // If arguments 0-7 are integers, they are passed in integer registers.
833 // Argument i is placed in iarg_reg[i].
834 //
835 case T_BOOLEAN:
836 case T_CHAR:
837 case T_BYTE:
838 case T_SHORT:
839 case T_INT:
840 // We must cast ints to longs and use full 64 bit stack slots
841 // here. Thus fall through, handle as long.
842 case T_LONG:
843 case T_OBJECT:
844 case T_ARRAY:
845 case T_ADDRESS:
846 case T_METADATA:
847 // Oops are already boxed if required (JNI).
848 if (arg < Argument::n_int_register_parameters_c) {
849 reg = iarg_reg[arg];
850 } else {
851 reg = VMRegImpl::stack2reg(stk);
852 stack_used = true;
853 }
854 regs[i].set2(reg);
855 break;
856
857 //
858 // Floats are treated differently from int regs: The first 13 float arguments
859 // are passed in registers (not the float args among the first 13 args).
860 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed
861 // in farg_reg[j] if argument i is the j-th float argument of this call.
862 //
863 case T_FLOAT:
864 if (freg < Argument::n_float_register_parameters_c) {
865 // Put float in register ...
866 reg = farg_reg[freg];
867 ++freg;
868 } else {
869 // Put float on stack.
870 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots);
871 stack_used = true;
872 }
873 regs[i].set1(reg);
874 break;
875 case T_DOUBLE:
876 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
877 if (freg < Argument::n_float_register_parameters_c) {
878 // Put double in register ...
879 reg = farg_reg[freg];
880 ++freg;
881 } else {
882 // Put double on stack.
883 reg = VMRegImpl::stack2reg(stk);
884 stack_used = true;
885 }
886 regs[i].set2(reg);
887 break;
888
889 case T_VOID:
890 // Do not count halves.
891 regs[i].set_bad();
892 --arg;
893 break;
894 default:
895 ShouldNotReachHere();
896 }
897 }
898
899 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots.
900 #if defined(ABI_ELFv2)
901 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots");
902 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype
903 // indicates that all parameters can be passed in registers.
904 return stack_used ? (arg * 2) : 0;
905 #else
906 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1.
907 // We have to add extra slots because ABIv1 uses a larger header.
908 return MAX2(arg, 8) * 2 + additional_frame_header_slots;
909 #endif
910 }
911
912 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
913 uint num_bits,
914 uint total_args_passed) {
915 Unimplemented();
916 return 0;
917 }
918
919 static address gen_c2i_adapter(MacroAssembler *masm,
920 int total_args_passed,
921 int comp_args_on_stack,
922 const BasicType *sig_bt,
923 const VMRegPair *regs,
924 Label& call_interpreter,
925 const Register& ientry) {
926
927 address c2i_entrypoint;
928
929 const Register sender_SP = R21_sender_SP; // == R21_tmp1
930 const Register code = R22_tmp2;
931 //const Register ientry = R23_tmp3;
932 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
933 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
934 int value_regs_index = 0;
935
936 const Register return_pc = R27_tmp7;
937 const Register tmp = R28_tmp8;
938
939 assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
940
941 // Adapter needs TOP_IJAVA_FRAME_ABI.
942 const int adapter_size = frame::top_ijava_frame_abi_size +
943 align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
944
945 // regular (verified) c2i entry point
946 c2i_entrypoint = __ pc();
947
948 // Does compiled code exists? If yes, patch the caller's callsite.
949 __ ld(code, method_(code));
950 __ cmpdi(CR0, code, 0);
951 __ ld(ientry, method_(interpreter_entry)); // preloaded
952 __ beq(CR0, call_interpreter);
953
954
955 // Patch caller's callsite, method_(code) was not null which means that
956 // compiled code exists.
957 __ mflr(return_pc);
958 __ std(return_pc, _abi0(lr), R1_SP);
959 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
960
961 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
962
963 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
964 __ ld(return_pc, _abi0(lr), R1_SP);
965 __ ld(ientry, method_(interpreter_entry)); // preloaded
966 __ mtlr(return_pc);
967
968
969 // Call the interpreter.
970 __ BIND(call_interpreter);
971 __ mtctr(ientry);
972
973 // Get a copy of the current SP for loading caller's arguments.
974 __ mr(sender_SP, R1_SP);
975
976 // Add space for the adapter.
977 __ resize_frame(-adapter_size, R12_scratch2);
978
979 int st_off = adapter_size - wordSize;
980
981 // Write the args into the outgoing interpreter space.
982 for (int i = 0; i < total_args_passed; i++) {
983 VMReg r_1 = regs[i].first();
984 VMReg r_2 = regs[i].second();
985 if (!r_1->is_valid()) {
986 assert(!r_2->is_valid(), "");
987 continue;
988 }
989 if (r_1->is_stack()) {
990 Register tmp_reg = value_regs[value_regs_index];
991 value_regs_index = (value_regs_index + 1) % num_value_regs;
992 // The calling convention produces OptoRegs that ignore the out
993 // preserve area (JIT's ABI). We must account for it here.
994 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
995 if (!r_2->is_valid()) {
996 __ lwz(tmp_reg, ld_off, sender_SP);
997 } else {
998 __ ld(tmp_reg, ld_off, sender_SP);
999 }
1000 // Pretend stack targets were loaded into tmp_reg.
1001 r_1 = tmp_reg->as_VMReg();
1002 }
1003
1004 if (r_1->is_Register()) {
1005 Register r = r_1->as_Register();
1006 if (!r_2->is_valid()) {
1007 __ stw(r, st_off, R1_SP);
1008 st_off-=wordSize;
1009 } else {
1010 // Longs are given 2 64-bit slots in the interpreter, but the
1011 // data is passed in only 1 slot.
1012 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
1013 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1014 st_off-=wordSize;
1015 }
1016 __ std(r, st_off, R1_SP);
1017 st_off-=wordSize;
1018 }
1019 } else {
1020 assert(r_1->is_FloatRegister(), "");
1021 FloatRegister f = r_1->as_FloatRegister();
1022 if (!r_2->is_valid()) {
1023 __ stfs(f, st_off, R1_SP);
1024 st_off-=wordSize;
1025 } else {
1026 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
1027 // data is passed in only 1 slot.
1028 // One of these should get known junk...
1029 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1030 st_off-=wordSize;
1031 __ stfd(f, st_off, R1_SP);
1032 st_off-=wordSize;
1033 }
1034 }
1035 }
1036
1037 // Jump to the interpreter just as if interpreter was doing it.
1038
1039 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
1040
1041 // load TOS
1042 __ addi(R15_esp, R1_SP, st_off);
1043
1044 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
1045 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
1046 __ bctr();
1047
1048 return c2i_entrypoint;
1049 }
1050
1051 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1052 int total_args_passed,
1053 int comp_args_on_stack,
1054 const BasicType *sig_bt,
1055 const VMRegPair *regs) {
1056
1057 // Load method's entry-point from method.
1058 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
1059 __ mtctr(R12_scratch2);
1060
1061 // We will only enter here from an interpreted frame and never from after
1062 // passing thru a c2i. Azul allowed this but we do not. If we lose the
1063 // race and use a c2i we will remain interpreted for the race loser(s).
1064 // This removes all sorts of headaches on the x86 side and also eliminates
1065 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
1066
1067 // Note: r13 contains the senderSP on entry. We must preserve it since
1068 // we may do a i2c -> c2i transition if we lose a race where compiled
1069 // code goes non-entrant while we get args ready.
1070 // In addition we use r13 to locate all the interpreter args as
1071 // we must align the stack to 16 bytes on an i2c entry else we
1072 // lose alignment we expect in all compiled code and register
1073 // save code can segv when fxsave instructions find improperly
1074 // aligned stack pointer.
1075
1076 const Register ld_ptr = R15_esp;
1077 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
1078 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
1079 int value_regs_index = 0;
1080
1081 int ld_offset = total_args_passed*wordSize;
1082
1083 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
1084 // in registers, we will occasionally have no stack args.
1085 int comp_words_on_stack = 0;
1086 if (comp_args_on_stack) {
1087 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
1088 // registers are below. By subtracting stack0, we either get a negative
1089 // number (all values in registers) or the maximum stack slot accessed.
1090
1091 // Convert 4-byte c2 stack slots to words.
1092 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1093 // Round up to miminum stack alignment, in wordSize.
1094 comp_words_on_stack = align_up(comp_words_on_stack, 2);
1095 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
1096 }
1097
1098 // Now generate the shuffle code. Pick up all register args and move the
1099 // rest through register value=Z_R12.
1100 BLOCK_COMMENT("Shuffle arguments");
1101 for (int i = 0; i < total_args_passed; i++) {
1102 if (sig_bt[i] == T_VOID) {
1103 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1104 continue;
1105 }
1106
1107 // Pick up 0, 1 or 2 words from ld_ptr.
1108 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1109 "scrambled load targets?");
1110 VMReg r_1 = regs[i].first();
1111 VMReg r_2 = regs[i].second();
1112 if (!r_1->is_valid()) {
1113 assert(!r_2->is_valid(), "");
1114 continue;
1115 }
1116 if (r_1->is_FloatRegister()) {
1117 if (!r_2->is_valid()) {
1118 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
1119 ld_offset-=wordSize;
1120 } else {
1121 // Skip the unused interpreter slot.
1122 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
1123 ld_offset-=2*wordSize;
1124 }
1125 } else {
1126 Register r;
1127 if (r_1->is_stack()) {
1128 // Must do a memory to memory move thru "value".
1129 r = value_regs[value_regs_index];
1130 value_regs_index = (value_regs_index + 1) % num_value_regs;
1131 } else {
1132 r = r_1->as_Register();
1133 }
1134 if (!r_2->is_valid()) {
1135 // Not sure we need to do this but it shouldn't hurt.
1136 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
1137 __ ld(r, ld_offset, ld_ptr);
1138 ld_offset-=wordSize;
1139 } else {
1140 __ lwz(r, ld_offset, ld_ptr);
1141 ld_offset-=wordSize;
1142 }
1143 } else {
1144 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
1145 // data is passed in only 1 slot.
1146 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
1147 ld_offset-=wordSize;
1148 }
1149 __ ld(r, ld_offset, ld_ptr);
1150 ld_offset-=wordSize;
1151 }
1152
1153 if (r_1->is_stack()) {
1154 // Now store value where the compiler expects it
1155 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
1156
1157 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
1158 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) {
1159 __ stw(r, st_off, R1_SP);
1160 } else {
1161 __ std(r, st_off, R1_SP);
1162 }
1163 }
1164 }
1165 }
1166
1167 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1168
1169 BLOCK_COMMENT("Store method");
1170 // Store method into thread->callee_target.
1171 // We might end up in handle_wrong_method if the callee is
1172 // deoptimized as we race thru here. If that happens we don't want
1173 // to take a safepoint because the caller frame will look
1174 // interpreted and arguments are now "compiled" so it is much better
1175 // to make this transition invisible to the stack walking
1176 // code. Unfortunately if we try and find the callee by normal means
1177 // a safepoint is possible. So we stash the desired callee in the
1178 // thread and the vm will find there should this case occur.
1179 __ std(R19_method, thread_(callee_target));
1180
1181 // Jump to the compiled code just as if compiled code was doing it.
1182 __ bctr();
1183 }
1184
1185 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1186 int total_args_passed,
1187 int comp_args_on_stack,
1188 const BasicType *sig_bt,
1189 const VMRegPair *regs,
1190 address entry_address[AdapterBlob::ENTRY_COUNT]) {
1191 // entry: i2c
1192
1193 __ align(CodeEntryAlignment);
1194 entry_address[AdapterBlob::I2C] = __ pc();
1195 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1196
1197
1198 // entry: c2i unverified
1199
1200 __ align(CodeEntryAlignment);
1201 BLOCK_COMMENT("c2i unverified entry");
1202 entry_address[AdapterBlob::C2I_Unverified] = __ pc();
1203
1204 // inline_cache contains a CompiledICData
1205 const Register ic = R19_inline_cache_reg;
1206 const Register ic_klass = R11_scratch1;
1207 const Register receiver_klass = R12_scratch2;
1208 const Register code = R21_tmp1;
1209 const Register ientry = R23_tmp3;
1210
1211 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
1212 assert(R11_scratch1 == R11, "need prologue scratch register");
1213
1214 Label call_interpreter;
1215
1216 __ ic_check(4 /* end_alignment */);
1217 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic);
1218 // Argument is valid and klass is as expected, continue.
1219
1220 __ ld(code, method_(code));
1221 __ cmpdi(CR0, code, 0);
1222 __ ld(ientry, method_(interpreter_entry)); // preloaded
1223 __ beq_predict_taken(CR0, call_interpreter);
1224
1225 // Branch to ic_miss_stub.
1226 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1227
1228 // entry: c2i
1229
1230 entry_address[AdapterBlob::C2I] = __ pc();
1231
1232 // Class initialization barrier for static methods
1233 entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
1234 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
1235 Label L_skip_barrier;
1236
1237 // Bypass the barrier for non-static methods
1238 __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
1239 __ andi_(R0, R0, JVM_ACC_STATIC);
1240 __ beq(CR0, L_skip_barrier); // non-static
1241
1242 Register klass = R11_scratch1;
1243 __ load_method_holder(klass, R19_method);
1244 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1245
1246 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1247 __ mtctr(klass);
1248 __ bctr();
1249
1250 __ bind(L_skip_barrier);
1251 entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
1252
1253 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1254 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
1255
1256 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1257 return;
1258 }
1259
1260 // An oop arg. Must pass a handle not the oop itself.
1261 static void object_move(MacroAssembler* masm,
1262 int frame_size_in_slots,
1263 OopMap* oop_map, int oop_handle_offset,
1264 bool is_receiver, int* receiver_offset,
1265 VMRegPair src, VMRegPair dst,
1266 Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1267 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1268 "receiver has already been moved");
1269
1270 // We must pass a handle. First figure out the location we use as a handle.
1271
1272 if (src.first()->is_stack()) {
1273 // stack to stack or reg
1274
1275 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1276 Label skip;
1277 const int oop_slot_in_callers_frame = reg2slot(src.first());
1278
1279 guarantee(!is_receiver, "expecting receiver in register");
1280 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
1281
1282 __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
1283 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
1284 __ cmpdi(CR0, r_temp_2, 0);
1285 __ bne(CR0, skip);
1286 // Use a null handle if oop is null.
1287 __ li(r_handle, 0);
1288 __ bind(skip);
1289
1290 if (dst.first()->is_stack()) {
1291 // stack to stack
1292 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1293 } else {
1294 // stack to reg
1295 // Nothing to do, r_handle is already the dst register.
1296 }
1297 } else {
1298 // reg to stack or reg
1299 const Register r_oop = src.first()->as_Register();
1300 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1301 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
1302 + oop_handle_offset; // in slots
1303 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
1304 Label skip;
1305
1306 if (is_receiver) {
1307 *receiver_offset = oop_offset;
1308 }
1309 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
1310
1311 __ std( r_oop, oop_offset, R1_SP);
1312 __ addi(r_handle, R1_SP, oop_offset);
1313
1314 __ cmpdi(CR0, r_oop, 0);
1315 __ bne(CR0, skip);
1316 // Use a null handle if oop is null.
1317 __ li(r_handle, 0);
1318 __ bind(skip);
1319
1320 if (dst.first()->is_stack()) {
1321 // reg to stack
1322 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1323 } else {
1324 // reg to reg
1325 // Nothing to do, r_handle is already the dst register.
1326 }
1327 }
1328 }
1329
1330 static void int_move(MacroAssembler*masm,
1331 VMRegPair src, VMRegPair dst,
1332 Register r_caller_sp, Register r_temp) {
1333 assert(src.first()->is_valid(), "incoming must be int");
1334 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1335
1336 if (src.first()->is_stack()) {
1337 if (dst.first()->is_stack()) {
1338 // stack to stack
1339 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
1340 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1341 } else {
1342 // stack to reg
1343 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1344 }
1345 } else if (dst.first()->is_stack()) {
1346 // reg to stack
1347 __ extsw(r_temp, src.first()->as_Register());
1348 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1349 } else {
1350 // reg to reg
1351 __ extsw(dst.first()->as_Register(), src.first()->as_Register());
1352 }
1353 }
1354
1355 static void long_move(MacroAssembler*masm,
1356 VMRegPair src, VMRegPair dst,
1357 Register r_caller_sp, Register r_temp) {
1358 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
1359 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1360
1361 if (src.first()->is_stack()) {
1362 if (dst.first()->is_stack()) {
1363 // stack to stack
1364 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1365 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1366 } else {
1367 // stack to reg
1368 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1369 }
1370 } else if (dst.first()->is_stack()) {
1371 // reg to stack
1372 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
1373 } else {
1374 // reg to reg
1375 if (dst.first()->as_Register() != src.first()->as_Register())
1376 __ mr(dst.first()->as_Register(), src.first()->as_Register());
1377 }
1378 }
1379
1380 static void float_move(MacroAssembler*masm,
1381 VMRegPair src, VMRegPair dst,
1382 Register r_caller_sp, Register r_temp) {
1383 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
1384 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
1385
1386 if (src.first()->is_stack()) {
1387 if (dst.first()->is_stack()) {
1388 // stack to stack
1389 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
1390 __ stw(r_temp, reg2offset(dst.first()), R1_SP);
1391 } else {
1392 // stack to reg
1393 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1394 }
1395 } else if (dst.first()->is_stack()) {
1396 // reg to stack
1397 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1398 } else {
1399 // reg to reg
1400 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1401 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1402 }
1403 }
1404
1405 static void double_move(MacroAssembler*masm,
1406 VMRegPair src, VMRegPair dst,
1407 Register r_caller_sp, Register r_temp) {
1408 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
1409 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
1410
1411 if (src.first()->is_stack()) {
1412 if (dst.first()->is_stack()) {
1413 // stack to stack
1414 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1415 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1416 } else {
1417 // stack to reg
1418 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1419 }
1420 } else if (dst.first()->is_stack()) {
1421 // reg to stack
1422 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1423 } else {
1424 // reg to reg
1425 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1426 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1427 }
1428 }
1429
1430 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1431 switch (ret_type) {
1432 case T_BOOLEAN:
1433 case T_CHAR:
1434 case T_BYTE:
1435 case T_SHORT:
1436 case T_INT:
1437 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1438 break;
1439 case T_ARRAY:
1440 case T_OBJECT:
1441 case T_LONG:
1442 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1443 break;
1444 case T_FLOAT:
1445 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1446 break;
1447 case T_DOUBLE:
1448 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1449 break;
1450 case T_VOID:
1451 break;
1452 default:
1453 ShouldNotReachHere();
1454 break;
1455 }
1456 }
1457
1458 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1459 switch (ret_type) {
1460 case T_BOOLEAN:
1461 case T_CHAR:
1462 case T_BYTE:
1463 case T_SHORT:
1464 case T_INT:
1465 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1466 break;
1467 case T_ARRAY:
1468 case T_OBJECT:
1469 case T_LONG:
1470 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1471 break;
1472 case T_FLOAT:
1473 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1474 break;
1475 case T_DOUBLE:
1476 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1477 break;
1478 case T_VOID:
1479 break;
1480 default:
1481 ShouldNotReachHere();
1482 break;
1483 }
1484 }
1485
1486 static void verify_oop_args(MacroAssembler* masm,
1487 const methodHandle& method,
1488 const BasicType* sig_bt,
1489 const VMRegPair* regs) {
1490 Register temp_reg = R19_method; // not part of any compiled calling seq
1491 if (VerifyOops) {
1492 for (int i = 0; i < method->size_of_parameters(); i++) {
1493 if (is_reference_type(sig_bt[i])) {
1494 VMReg r = regs[i].first();
1495 assert(r->is_valid(), "bad oop arg");
1496 if (r->is_stack()) {
1497 __ ld(temp_reg, reg2offset(r), R1_SP);
1498 __ verify_oop(temp_reg, FILE_AND_LINE);
1499 } else {
1500 __ verify_oop(r->as_Register(), FILE_AND_LINE);
1501 }
1502 }
1503 }
1504 }
1505 }
1506
1507 static void gen_special_dispatch(MacroAssembler* masm,
1508 const methodHandle& method,
1509 const BasicType* sig_bt,
1510 const VMRegPair* regs) {
1511 verify_oop_args(masm, method, sig_bt, regs);
1512 vmIntrinsics::ID iid = method->intrinsic_id();
1513
1514 // Now write the args into the outgoing interpreter space
1515 bool has_receiver = false;
1516 Register receiver_reg = noreg;
1517 int member_arg_pos = -1;
1518 Register member_reg = noreg;
1519 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1520 if (ref_kind != 0) {
1521 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1522 member_reg = R19_method; // known to be free at this point
1523 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1524 } else if (iid == vmIntrinsics::_invokeBasic) {
1525 has_receiver = true;
1526 } else if (iid == vmIntrinsics::_linkToNative) {
1527 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
1528 member_reg = R19_method; // known to be free at this point
1529 } else {
1530 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1531 }
1532
1533 if (member_reg != noreg) {
1534 // Load the member_arg into register, if necessary.
1535 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1536 VMReg r = regs[member_arg_pos].first();
1537 if (r->is_stack()) {
1538 __ ld(member_reg, reg2offset(r), R1_SP);
1539 } else {
1540 // no data motion is needed
1541 member_reg = r->as_Register();
1542 }
1543 }
1544
1545 if (has_receiver) {
1546 // Make sure the receiver is loaded into a register.
1547 assert(method->size_of_parameters() > 0, "oob");
1548 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1549 VMReg r = regs[0].first();
1550 assert(r->is_valid(), "bad receiver arg");
1551 if (r->is_stack()) {
1552 // Porting note: This assumes that compiled calling conventions always
1553 // pass the receiver oop in a register. If this is not true on some
1554 // platform, pick a temp and load the receiver from stack.
1555 fatal("receiver always in a register");
1556 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point?
1557 __ ld(receiver_reg, reg2offset(r), R1_SP);
1558 } else {
1559 // no data motion is needed
1560 receiver_reg = r->as_Register();
1561 }
1562 }
1563
1564 // Figure out which address we are really jumping to:
1565 MethodHandles::generate_method_handle_dispatch(masm, iid,
1566 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1567 }
1568
1569 //---------------------------- continuation_enter_setup ---------------------------
1570 //
1571 // Frame setup.
1572 //
1573 // Arguments:
1574 // None.
1575 //
1576 // Results:
1577 // R1_SP: pointer to blank ContinuationEntry in the pushed frame.
1578 //
1579 // Kills:
1580 // R0, R20
1581 //
1582 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) {
1583 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1584 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, "");
1585 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1586
1587 const int frame_size_in_bytes = (int)ContinuationEntry::size();
1588 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error");
1589
1590 framesize_words = frame_size_in_bytes / wordSize;
1591
1592 DEBUG_ONLY(__ block_comment("setup {"));
1593 // Save return pc and push entry frame
1594 const Register return_pc = R20;
1595 __ mflr(return_pc);
1596 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc
1597 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes
1598
1599 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1600
1601 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread);
1602 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1603 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP);
1604 DEBUG_ONLY(__ block_comment("} setup"));
1605
1606 return map;
1607 }
1608
1609 //---------------------------- fill_continuation_entry ---------------------------
1610 //
1611 // Initialize the new ContinuationEntry.
1612 //
1613 // Arguments:
1614 // R1_SP: pointer to blank Continuation entry
1615 // reg_cont_obj: pointer to the continuation
1616 // reg_flags: flags
1617 //
1618 // Results:
1619 // R1_SP: pointer to filled out ContinuationEntry
1620 //
1621 // Kills:
1622 // R8_ARG6, R9_ARG7, R10_ARG8
1623 //
1624 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) {
1625 assert_different_registers(reg_cont_obj, reg_flags);
1626 Register zero = R8_ARG6;
1627 Register tmp2 = R9_ARG7;
1628
1629 DEBUG_ONLY(__ block_comment("fill {"));
1630 #ifdef ASSERT
1631 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value());
1632 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP);
1633 #endif //ASSERT
1634
1635 __ li(zero, 0);
1636 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP);
1637 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
1638 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP);
1639 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP);
1640 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP);
1641
1642 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread);
1643 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1644
1645 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread);
1646 DEBUG_ONLY(__ block_comment("} fill"));
1647 }
1648
1649 //---------------------------- continuation_enter_cleanup ---------------------------
1650 //
1651 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread
1652 // before deleting it.
1653 //
1654 // Arguments:
1655 // R1_SP: pointer to the ContinuationEntry
1656 //
1657 // Results:
1658 // None.
1659 //
1660 // Kills:
1661 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp
1662 //
1663 static void continuation_enter_cleanup(MacroAssembler* masm) {
1664 Register tmp1 = R8_ARG6;
1665 Register tmp2 = R9_ARG7;
1666
1667 #ifdef ASSERT
1668 __ block_comment("clean {");
1669 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
1670 __ cmpd(CR0, R1_SP, tmp1);
1671 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
1672 #endif
1673
1674 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1675 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread);
1676 __ ld_ptr(tmp2, ContinuationEntry::parent_offset(), R1_SP);
1677 __ st_ptr(tmp2, JavaThread::cont_entry_offset(), R16_thread);
1678 DEBUG_ONLY(__ block_comment("} clean"));
1679 }
1680
1681 static void check_continuation_enter_argument(VMReg actual_vmreg,
1682 Register expected_reg,
1683 const char* name) {
1684 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name);
1685 assert(actual_vmreg->as_Register() == expected_reg,
1686 "%s is in unexpected register: %s instead of %s",
1687 name, actual_vmreg->as_Register()->name(), expected_reg->name());
1688 }
1689
1690 static void gen_continuation_enter(MacroAssembler* masm,
1691 const VMRegPair* regs,
1692 int& exception_offset,
1693 OopMapSet* oop_maps,
1694 int& frame_complete,
1695 int& framesize_words,
1696 int& interpreted_entry_offset,
1697 int& compiled_entry_offset) {
1698
1699 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1700 int pos_cont_obj = 0;
1701 int pos_is_cont = 1;
1702 int pos_is_virtual = 2;
1703
1704 // The platform-specific calling convention may present the arguments in various registers.
1705 // To simplify the rest of the code, we expect the arguments to reside at these known
1706 // registers, and we additionally check the placement here in case calling convention ever
1707 // changes.
1708 Register reg_cont_obj = R3_ARG1;
1709 Register reg_is_cont = R4_ARG2;
1710 Register reg_is_virtual = R5_ARG3;
1711
1712 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object");
1713 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue");
1714 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread");
1715
1716 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub();
1717
1718 address start = __ pc();
1719
1720 Label L_thaw, L_exit;
1721
1722 // i2i entry used at interp_only_mode only
1723 interpreted_entry_offset = __ pc() - start;
1724 {
1725 #ifdef ASSERT
1726 Label is_interp_only;
1727 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1728 __ cmpwi(CR0, R0, 0);
1729 __ bne(CR0, is_interp_only);
1730 __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1731 __ bind(is_interp_only);
1732 #endif
1733
1734 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1735 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp);
1736 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp);
1737 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp);
1738
1739 __ push_cont_fastpath();
1740
1741 OopMap* map = continuation_enter_setup(masm, framesize_words);
1742
1743 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1744 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1745
1746 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1747
1748 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1749 __ cmpwi(CR0, reg_is_cont, 0);
1750 __ bne(CR0, L_thaw);
1751
1752 // --- call Continuation.enter(Continuation c, boolean isContinue)
1753
1754 // Emit compiled static call. The call will be always resolved to the c2i
1755 // entry of Continuation.enter(Continuation c, boolean isContinue).
1756 // There are special cases in SharedRuntime::resolve_static_call_C() and
1757 // SharedRuntime::resolve_sub_helper_internal() to achieve this
1758 // See also corresponding call below.
1759 address c2i_call_pc = __ pc();
1760 int start_offset = __ offset();
1761 // Put the entry point as a constant into the constant pool.
1762 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1763 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1764 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1765
1766 // Emit the trampoline stub which will be related to the branch-and-link below.
1767 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1768 guarantee(stub != nullptr, "no space for trampoline stub");
1769
1770 __ relocate(relocInfo::static_call_type);
1771 // Note: At this point we do not have the address of the trampoline
1772 // stub, and the entry point might be too far away for bl, so __ pc()
1773 // serves as dummy and the bl will be patched later.
1774 __ bl(__ pc());
1775 oop_maps->add_gc_map(__ pc() - start, map);
1776 __ post_call_nop();
1777
1778 __ b(L_exit);
1779
1780 // static stub for the call above
1781 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc);
1782 guarantee(stub != nullptr, "no space for static stub");
1783 }
1784
1785 // compiled entry
1786 __ align(CodeEntryAlignment);
1787 compiled_entry_offset = __ pc() - start;
1788
1789 OopMap* map = continuation_enter_setup(masm, framesize_words);
1790
1791 // Frame is now completed as far as size and linkage.
1792 frame_complete =__ pc() - start;
1793
1794 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1795
1796 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1797 __ cmpwi(CR0, reg_is_cont, 0);
1798 __ bne(CR0, L_thaw);
1799
1800 // --- call Continuation.enter(Continuation c, boolean isContinue)
1801
1802 // Emit compiled static call
1803 // The call needs to be resolved. There's a special case for this in
1804 // SharedRuntime::find_callee_info_helper() which calls
1805 // LinkResolver::resolve_continuation_enter() which resolves the call to
1806 // Continuation.enter(Continuation c, boolean isContinue).
1807 address call_pc = __ pc();
1808 int start_offset = __ offset();
1809 // Put the entry point as a constant into the constant pool.
1810 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1811 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1812 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1813
1814 // Emit the trampoline stub which will be related to the branch-and-link below.
1815 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1816 guarantee(stub != nullptr, "no space for trampoline stub");
1817
1818 __ relocate(relocInfo::static_call_type);
1819 // Note: At this point we do not have the address of the trampoline
1820 // stub, and the entry point might be too far away for bl, so __ pc()
1821 // serves as dummy and the bl will be patched later.
1822 __ bl(__ pc());
1823 oop_maps->add_gc_map(__ pc() - start, map);
1824 __ post_call_nop();
1825
1826 __ b(L_exit);
1827
1828 // --- Thawing path
1829
1830 __ bind(L_thaw);
1831 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1832 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw()));
1833 __ mtctr(R0);
1834 __ bctrl();
1835 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1836 ContinuationEntry::_return_pc_offset = __ pc() - start;
1837 __ post_call_nop();
1838
1839 // --- Normal exit (resolve/thawing)
1840
1841 __ bind(L_exit);
1842 ContinuationEntry::_cleanup_offset = __ pc() - start;
1843 continuation_enter_cleanup(masm);
1844
1845 // Pop frame and return
1846 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP));
1847 __ addi(R1_SP, R1_SP, framesize_words*wordSize);
1848 DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP));
1849 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size");
1850 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1851 __ mtlr(R0);
1852 __ blr();
1853
1854 // --- Exception handling path
1855
1856 exception_offset = __ pc() - start;
1857
1858 continuation_enter_cleanup(masm);
1859 Register ex_pc = R17_tos; // nonvolatile register
1860 Register ex_oop = R15_esp; // nonvolatile register
1861 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc
1862 __ ld(ex_pc, _abi0(lr), ex_pc);
1863 __ mr(ex_oop, R3_RET); // save return value containing the exception oop
1864 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc);
1865 __ mtlr(R3_RET); // the exception handler
1866 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame
1867
1868 // Continue at exception handler
1869 // See OptoRuntime::generate_exception_blob for register arguments
1870 __ mr(R3_ARG1, ex_oop); // pass exception oop
1871 __ mr(R4_ARG2, ex_pc); // pass exception pc
1872 __ blr();
1873
1874 // static stub for the call above
1875 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
1876 guarantee(stub != nullptr, "no space for static stub");
1877 }
1878
1879 static void gen_continuation_yield(MacroAssembler* masm,
1880 const VMRegPair* regs,
1881 OopMapSet* oop_maps,
1882 int& frame_complete,
1883 int& framesize_words,
1884 int& compiled_entry_offset) {
1885 Register tmp = R10_ARG8;
1886
1887 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes);
1888 framesize_words = framesize_bytes / wordSize;
1889
1890 address start = __ pc();
1891 compiled_entry_offset = __ pc() - start;
1892
1893 // Save return pc and push entry frame
1894 __ mflr(tmp);
1895 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc
1896 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes
1897
1898 DEBUG_ONLY(__ block_comment("Frame Complete"));
1899 frame_complete = __ pc() - start;
1900 address last_java_pc = __ pc();
1901
1902 // This nop must be exactly at the PC we push into the frame info.
1903 // We use this nop for fast CodeBlob lookup, associate the OopMap
1904 // with it right away.
1905 __ post_call_nop();
1906 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1);
1907 oop_maps->add_gc_map(last_java_pc - start, map);
1908
1909 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated
1910 __ set_last_Java_frame(R1_SP, tmp);
1911 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP);
1912 __ reset_last_Java_frame();
1913
1914 Label L_pinned;
1915
1916 __ cmpwi(CR0, R3_RET, 0);
1917 __ bne(CR0, L_pinned);
1918
1919 // yield succeeded
1920
1921 // Pop frames of continuation including this stub's frame
1922 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1923 // The frame pushed by gen_continuation_enter is on top now again
1924 continuation_enter_cleanup(masm);
1925
1926 // Pop frame and return
1927 Label L_return;
1928 __ bind(L_return);
1929 __ pop_frame();
1930 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1931 __ mtlr(R0);
1932 __ blr();
1933
1934 // yield failed - continuation is pinned
1935
1936 __ bind(L_pinned);
1937
1938 // handle pending exception thrown by freeze
1939 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
1940 __ cmpdi(CR0, tmp, 0);
1941 __ beq(CR0, L_return); // return if no exception is pending
1942 __ pop_frame();
1943 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1944 __ mtlr(R0);
1945 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0);
1946 __ mtctr(tmp);
1947 __ bctr();
1948 }
1949
1950 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1951 ::continuation_enter_cleanup(masm);
1952 }
1953
1954 // ---------------------------------------------------------------------------
1955 // Generate a native wrapper for a given method. The method takes arguments
1956 // in the Java compiled code convention, marshals them to the native
1957 // convention (handlizes oops, etc), transitions to native, makes the call,
1958 // returns to java state (possibly blocking), unhandlizes any result and
1959 // returns.
1960 //
1961 // Critical native functions are a shorthand for the use of
1962 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1963 // functions. The wrapper is expected to unpack the arguments before
1964 // passing them to the callee. Critical native functions leave the state _in_Java,
1965 // since they cannot stop for GC.
1966 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1967 // block and the check for pending exceptions it's impossible for them
1968 // to be thrown.
1969 //
1970 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1971 const methodHandle& method,
1972 int compile_id,
1973 BasicType *in_sig_bt,
1974 VMRegPair *in_regs,
1975 BasicType ret_type) {
1976 if (method->is_continuation_native_intrinsic()) {
1977 int exception_offset = -1;
1978 OopMapSet* oop_maps = new OopMapSet();
1979 int frame_complete = -1;
1980 int stack_slots = -1;
1981 int interpreted_entry_offset = -1;
1982 int vep_offset = -1;
1983 if (method->is_continuation_enter_intrinsic()) {
1984 gen_continuation_enter(masm,
1985 in_regs,
1986 exception_offset,
1987 oop_maps,
1988 frame_complete,
1989 stack_slots,
1990 interpreted_entry_offset,
1991 vep_offset);
1992 } else if (method->is_continuation_yield_intrinsic()) {
1993 gen_continuation_yield(masm,
1994 in_regs,
1995 oop_maps,
1996 frame_complete,
1997 stack_slots,
1998 vep_offset);
1999 } else {
2000 guarantee(false, "Unknown Continuation native intrinsic");
2001 }
2002
2003 #ifdef ASSERT
2004 if (method->is_continuation_enter_intrinsic()) {
2005 assert(interpreted_entry_offset != -1, "Must be set");
2006 assert(exception_offset != -1, "Must be set");
2007 } else {
2008 assert(interpreted_entry_offset == -1, "Must be unset");
2009 assert(exception_offset == -1, "Must be unset");
2010 }
2011 assert(frame_complete != -1, "Must be set");
2012 assert(stack_slots != -1, "Must be set");
2013 assert(vep_offset != -1, "Must be set");
2014 #endif
2015
2016 __ flush();
2017 nmethod* nm = nmethod::new_native_nmethod(method,
2018 compile_id,
2019 masm->code(),
2020 vep_offset,
2021 frame_complete,
2022 stack_slots,
2023 in_ByteSize(-1),
2024 in_ByteSize(-1),
2025 oop_maps,
2026 exception_offset);
2027 if (nm == nullptr) return nm;
2028 if (method->is_continuation_enter_intrinsic()) {
2029 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
2030 } else if (method->is_continuation_yield_intrinsic()) {
2031 _cont_doYield_stub = nm;
2032 }
2033 return nm;
2034 }
2035
2036 if (method->is_method_handle_intrinsic()) {
2037 vmIntrinsics::ID iid = method->intrinsic_id();
2038 intptr_t start = (intptr_t)__ pc();
2039 int vep_offset = ((intptr_t)__ pc()) - start;
2040 gen_special_dispatch(masm,
2041 method,
2042 in_sig_bt,
2043 in_regs);
2044 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
2045 __ flush();
2046 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
2047 return nmethod::new_native_nmethod(method,
2048 compile_id,
2049 masm->code(),
2050 vep_offset,
2051 frame_complete,
2052 stack_slots / VMRegImpl::slots_per_word,
2053 in_ByteSize(-1),
2054 in_ByteSize(-1),
2055 (OopMapSet*)nullptr);
2056 }
2057
2058 address native_func = method->native_function();
2059 assert(native_func != nullptr, "must have function");
2060
2061 // First, create signature for outgoing C call
2062 // --------------------------------------------------------------------------
2063
2064 int total_in_args = method->size_of_parameters();
2065 // We have received a description of where all the java args are located
2066 // on entry to the wrapper. We need to convert these args to where
2067 // the jni function will expect them. To figure out where they go
2068 // we convert the java signature to a C signature by inserting
2069 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2070
2071 // Calculate the total number of C arguments and create arrays for the
2072 // signature and the outgoing registers.
2073 // On ppc64, we have two arrays for the outgoing registers, because
2074 // some floating-point arguments must be passed in registers _and_
2075 // in stack locations.
2076 bool method_is_static = method->is_static();
2077 int total_c_args = total_in_args + (method_is_static ? 2 : 1);
2078
2079 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2080 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2081
2082 // Create the signature for the C call:
2083 // 1) add the JNIEnv*
2084 // 2) add the class if the method is static
2085 // 3) copy the rest of the incoming signature (shifted by the number of
2086 // hidden arguments).
2087
2088 int argc = 0;
2089 out_sig_bt[argc++] = T_ADDRESS;
2090 if (method->is_static()) {
2091 out_sig_bt[argc++] = T_OBJECT;
2092 }
2093
2094 for (int i = 0; i < total_in_args ; i++ ) {
2095 out_sig_bt[argc++] = in_sig_bt[i];
2096 }
2097
2098
2099 // Compute the wrapper's frame size.
2100 // --------------------------------------------------------------------------
2101
2102 // Now figure out where the args must be stored and how much stack space
2103 // they require.
2104 //
2105 // Compute framesize for the wrapper. We need to handlize all oops in
2106 // incoming registers.
2107 //
2108 // Calculate the total number of stack slots we will need:
2109 // 1) abi requirements
2110 // 2) outgoing arguments
2111 // 3) space for inbound oop handle area
2112 // 4) space for handlizing a klass if static method
2113 // 5) space for a lock if synchronized method
2114 // 6) workspace for saving return values, int <-> float reg moves, etc.
2115 // 7) alignment
2116 //
2117 // Layout of the native wrapper frame:
2118 // (stack grows upwards, memory grows downwards)
2119 //
2120 // NW [ABI_REG_ARGS] <-- 1) R1_SP
2121 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
2122 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset
2123 // klass <-- 4) R1_SP + klass_offset
2124 // lock <-- 5) R1_SP + lock_offset
2125 // [workspace] <-- 6) R1_SP + workspace_offset
2126 // [alignment] (optional) <-- 7)
2127 // caller [JIT_TOP_ABI_48] <-- r_callers_sp
2128 //
2129 // - *_slot_offset Indicates offset from SP in number of stack slots.
2130 // - *_offset Indicates offset from SP in bytes.
2131
2132 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2)
2133 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
2134
2135 // Now the space for the inbound oop handle area.
2136 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
2137
2138 int oop_handle_slot_offset = stack_slots;
2139 stack_slots += total_save_slots; // 3)
2140
2141 int klass_slot_offset = 0;
2142 int klass_offset = -1;
2143 if (method_is_static) { // 4)
2144 klass_slot_offset = stack_slots;
2145 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2146 stack_slots += VMRegImpl::slots_per_word;
2147 }
2148
2149 int lock_slot_offset = 0;
2150 int lock_offset = -1;
2151 if (method->is_synchronized()) { // 5)
2152 lock_slot_offset = stack_slots;
2153 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
2154 stack_slots += VMRegImpl::slots_per_word;
2155 }
2156
2157 int workspace_slot_offset = stack_slots; // 6)
2158 stack_slots += 2;
2159
2160 // Now compute actual number of stack words we need.
2161 // Rounding to make stack properly aligned.
2162 stack_slots = align_up(stack_slots, // 7)
2163 frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
2164 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
2165
2166
2167 // Now we can start generating code.
2168 // --------------------------------------------------------------------------
2169
2170 intptr_t start_pc = (intptr_t)__ pc();
2171 intptr_t vep_start_pc;
2172 intptr_t frame_done_pc;
2173
2174 Label handle_pending_exception;
2175 Label last_java_pc;
2176
2177 Register r_callers_sp = R21;
2178 Register r_temp_1 = R22;
2179 Register r_temp_2 = R23;
2180 Register r_temp_3 = R24;
2181 Register r_temp_4 = R25;
2182 Register r_temp_5 = R26;
2183 Register r_temp_6 = R27;
2184 Register r_last_java_pc = R28;
2185
2186 Register r_carg1_jnienv = noreg;
2187 Register r_carg2_classorobject = noreg;
2188 r_carg1_jnienv = out_regs[0].first()->as_Register();
2189 r_carg2_classorobject = out_regs[1].first()->as_Register();
2190
2191
2192 // Generate the Unverified Entry Point (UEP).
2193 // --------------------------------------------------------------------------
2194 assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2195
2196 // Check ic: object class == cached class?
2197 if (!method_is_static) {
2198 __ ic_check(4 /* end_alignment */);
2199 }
2200
2201 // Generate the Verified Entry Point (VEP).
2202 // --------------------------------------------------------------------------
2203 vep_start_pc = (intptr_t)__ pc();
2204
2205 if (method->needs_clinit_barrier()) {
2206 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2207 Label L_skip_barrier;
2208 Register klass = r_temp_1;
2209 // Notify OOP recorder (don't need the relocation)
2210 AddressLiteral md = __ constant_metadata_address(method->method_holder());
2211 __ load_const_optimized(klass, md.value(), R0);
2212 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
2213
2214 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
2215 __ mtctr(klass);
2216 __ bctr();
2217
2218 __ bind(L_skip_barrier);
2219 }
2220
2221 __ save_LR(r_temp_1);
2222 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2223 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2224 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2225
2226 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2227 bs->nmethod_entry_barrier(masm, r_temp_1);
2228
2229 frame_done_pc = (intptr_t)__ pc();
2230
2231 // Native nmethod wrappers never take possession of the oop arguments.
2232 // So the caller will gc the arguments.
2233 // The only thing we need an oopMap for is if the call is static.
2234 //
2235 // An OopMap for lock (and class if static), and one for the VM call itself.
2236 OopMapSet *oop_maps = new OopMapSet();
2237 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2238
2239 // Move arguments from register/stack to register/stack.
2240 // --------------------------------------------------------------------------
2241 //
2242 // We immediately shuffle the arguments so that for any vm call we have
2243 // to make from here on out (sync slow path, jvmti, etc.) we will have
2244 // captured the oops from our caller and have a valid oopMap for them.
2245 //
2246 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2247 // (derived from JavaThread* which is in R16_thread) and, if static,
2248 // the class mirror instead of a receiver. This pretty much guarantees that
2249 // register layout will not match. We ignore these extra arguments during
2250 // the shuffle. The shuffle is described by the two calling convention
2251 // vectors we have in our possession. We simply walk the java vector to
2252 // get the source locations and the c vector to get the destinations.
2253
2254 // Record sp-based slot for receiver on stack for non-static methods.
2255 int receiver_offset = -1;
2256
2257 // We move the arguments backward because the floating point registers
2258 // destination will always be to a register with a greater or equal
2259 // register number or the stack.
2260 // in is the index of the incoming Java arguments
2261 // out is the index of the outgoing C arguments
2262
2263 #ifdef ASSERT
2264 bool reg_destroyed[Register::number_of_registers];
2265 bool freg_destroyed[FloatRegister::number_of_registers];
2266 for (int r = 0 ; r < Register::number_of_registers ; r++) {
2267 reg_destroyed[r] = false;
2268 }
2269 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) {
2270 freg_destroyed[f] = false;
2271 }
2272 #endif // ASSERT
2273
2274 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
2275
2276 #ifdef ASSERT
2277 if (in_regs[in].first()->is_Register()) {
2278 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
2279 } else if (in_regs[in].first()->is_FloatRegister()) {
2280 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
2281 }
2282 if (out_regs[out].first()->is_Register()) {
2283 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
2284 } else if (out_regs[out].first()->is_FloatRegister()) {
2285 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
2286 }
2287 #endif // ASSERT
2288
2289 switch (in_sig_bt[in]) {
2290 case T_BOOLEAN:
2291 case T_CHAR:
2292 case T_BYTE:
2293 case T_SHORT:
2294 case T_INT:
2295 // Move int and do sign extension.
2296 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2297 break;
2298 case T_LONG:
2299 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2300 break;
2301 case T_ARRAY:
2302 case T_OBJECT:
2303 object_move(masm, stack_slots,
2304 oop_map, oop_handle_slot_offset,
2305 ((in == 0) && (!method_is_static)), &receiver_offset,
2306 in_regs[in], out_regs[out],
2307 r_callers_sp, r_temp_1, r_temp_2);
2308 break;
2309 case T_VOID:
2310 break;
2311 case T_FLOAT:
2312 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2313 break;
2314 case T_DOUBLE:
2315 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2316 break;
2317 case T_ADDRESS:
2318 fatal("found type (T_ADDRESS) in java args");
2319 break;
2320 default:
2321 ShouldNotReachHere();
2322 break;
2323 }
2324 }
2325
2326 // Pre-load a static method's oop into ARG2.
2327 // Used both by locking code and the normal JNI call code.
2328 if (method_is_static) {
2329 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
2330 r_carg2_classorobject);
2331
2332 // Now handlize the static class mirror in carg2. It's known not-null.
2333 __ std(r_carg2_classorobject, klass_offset, R1_SP);
2334 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2335 __ addi(r_carg2_classorobject, R1_SP, klass_offset);
2336 }
2337
2338 // Get JNIEnv* which is first argument to native.
2339 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
2340
2341 // NOTE:
2342 //
2343 // We have all of the arguments setup at this point.
2344 // We MUST NOT touch any outgoing regs from this point on.
2345 // So if we must call out we must push a new frame.
2346
2347 // The last java pc will also be used as resume pc if this is the wrapper for wait0.
2348 // For this purpose the precise location matters but not for oopmap lookup.
2349 __ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true);
2350
2351 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
2352 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
2353
2354 # if 0
2355 // DTrace method entry
2356 # endif
2357
2358 // Lock a synchronized method.
2359 // --------------------------------------------------------------------------
2360
2361 if (method->is_synchronized()) {
2362 Register r_oop = r_temp_4;
2363 const Register r_box = r_temp_5;
2364 Label done, locked;
2365
2366 // Load the oop for the object or class. r_carg2_classorobject contains
2367 // either the handlized oop from the incoming arguments or the handlized
2368 // class mirror (if the method is static).
2369 __ ld(r_oop, 0, r_carg2_classorobject);
2370
2371 // Get the lock box slot's address.
2372 __ addi(r_box, R1_SP, lock_offset);
2373
2374 // Try fastpath for locking.
2375 // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
2376 Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
2377 __ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
2378 __ beq(CR0, locked);
2379
2380 // None of the above fast optimizations worked so we have to get into the
2381 // slow case of monitor enter. Inline a special case of call_VM that
2382 // disallows any pending_exception.
2383
2384 // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
2385 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes);
2386 __ mr(R11_scratch1, R1_SP);
2387 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs);
2388
2389 // Do the call.
2390 __ set_last_Java_frame(R11_scratch1, r_last_java_pc);
2391 assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call");
2392 // The following call will not be preempted.
2393 // push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the
2394 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()).
2395 __ push_cont_fastpath();
2396 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
2397 __ pop_cont_fastpath();
2398 __ reset_last_Java_frame();
2399
2400 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs);
2401
2402 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2403 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C");
2404
2405 __ bind(locked);
2406 }
2407
2408 __ set_last_Java_frame(R1_SP, r_last_java_pc);
2409
2410 // Publish thread state
2411 // --------------------------------------------------------------------------
2412
2413 // Transition from _thread_in_Java to _thread_in_native.
2414 __ li(R0, _thread_in_native);
2415 __ release();
2416 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2417 __ stw(R0, thread_(thread_state));
2418
2419
2420 // The JNI call
2421 // --------------------------------------------------------------------------
2422 __ call_c(native_func, relocInfo::runtime_call_type);
2423
2424
2425 // Now, we are back from the native code.
2426
2427
2428 // Unpack the native result.
2429 // --------------------------------------------------------------------------
2430
2431 // For int-types, we do any needed sign-extension required.
2432 // Care must be taken that the return values (R3_RET and F1_RET)
2433 // will survive any VM calls for blocking or unlocking.
2434 // An OOP result (handle) is done specially in the slow-path code.
2435
2436 switch (ret_type) {
2437 case T_VOID: break; // Nothing to do!
2438 case T_FLOAT: break; // Got it where we want it (unless slow-path).
2439 case T_DOUBLE: break; // Got it where we want it (unless slow-path).
2440 case T_LONG: break; // Got it where we want it (unless slow-path).
2441 case T_OBJECT: break; // Really a handle.
2442 // Cannot de-handlize until after reclaiming jvm_lock.
2443 case T_ARRAY: break;
2444
2445 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1)
2446 __ normalize_bool(R3_RET);
2447 break;
2448 }
2449 case T_BYTE: { // sign extension
2450 __ extsb(R3_RET, R3_RET);
2451 break;
2452 }
2453 case T_CHAR: { // unsigned result
2454 __ andi(R3_RET, R3_RET, 0xffff);
2455 break;
2456 }
2457 case T_SHORT: { // sign extension
2458 __ extsh(R3_RET, R3_RET);
2459 break;
2460 }
2461 case T_INT: // nothing to do
2462 break;
2463 default:
2464 ShouldNotReachHere();
2465 break;
2466 }
2467
2468 // Publish thread state
2469 // --------------------------------------------------------------------------
2470
2471 // Switch thread to "native transition" state before reading the
2472 // synchronization state. This additional state is necessary because reading
2473 // and testing the synchronization state is not atomic w.r.t. GC, as this
2474 // scenario demonstrates:
2475 // - Java thread A, in _thread_in_native state, loads _not_synchronized
2476 // and is preempted.
2477 // - VM thread changes sync state to synchronizing and suspends threads
2478 // for GC.
2479 // - Thread A is resumed to finish this native method, but doesn't block
2480 // here since it didn't see any synchronization in progress, and escapes.
2481
2482 // Transition from _thread_in_native to _thread_in_native_trans.
2483 __ li(R0, _thread_in_native_trans);
2484 __ release();
2485 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2486 __ stw(R0, thread_(thread_state));
2487
2488
2489 // Must we block?
2490 // --------------------------------------------------------------------------
2491
2492 // Block, if necessary, before resuming in _thread_in_Java state.
2493 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2494 {
2495 Label no_block, sync;
2496
2497 // Force this write out before the read below.
2498 if (!UseSystemMemoryBarrier) {
2499 __ fence();
2500 }
2501
2502 Register sync_state_addr = r_temp_4;
2503 Register sync_state = r_temp_5;
2504 Register suspend_flags = r_temp_6;
2505
2506 // No synchronization in progress nor yet synchronized
2507 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
2508 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */);
2509
2510 // Not suspended.
2511 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2512 __ lwz(suspend_flags, thread_(suspend_flags));
2513 __ cmpwi(CR1, suspend_flags, 0);
2514 __ beq(CR1, no_block);
2515
2516 // Block. Save any potential method result value before the operation and
2517 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2518 // lets us share the oopMap we used when we went native rather than create
2519 // a distinct one for this pc.
2520 __ bind(sync);
2521 __ isync();
2522
2523 address entry_point =
2524 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2525 save_native_result(masm, ret_type, workspace_slot_offset);
2526 __ call_VM_leaf(entry_point, R16_thread);
2527 restore_native_result(masm, ret_type, workspace_slot_offset);
2528
2529 __ bind(no_block);
2530
2531 // Publish thread state.
2532 // --------------------------------------------------------------------------
2533
2534 // Thread state is thread_in_native_trans. Any safepoint blocking has
2535 // already happened so we can now change state to _thread_in_Java.
2536
2537 // Transition from _thread_in_native_trans to _thread_in_Java.
2538 __ li(R0, _thread_in_Java);
2539 __ lwsync(); // Acquire safepoint and suspend state, release thread state.
2540 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2541 __ stw(R0, thread_(thread_state));
2542
2543 // Check preemption for Object.wait()
2544 if (method->is_object_wait0()) {
2545 Label not_preempted;
2546 __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
2547 __ cmpdi(CR0, R0, 0);
2548 __ beq(CR0, not_preempted);
2549 __ mtlr(R0);
2550 __ li(R0, 0);
2551 __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
2552 __ blr();
2553 __ bind(not_preempted);
2554 }
2555 __ bind(last_java_pc);
2556 // We use the same pc/oopMap repeatedly when we call out above.
2557 intptr_t oopmap_pc = (intptr_t) __ pc();
2558 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
2559 }
2560
2561 // Reguard any pages if necessary.
2562 // --------------------------------------------------------------------------
2563
2564 Label no_reguard;
2565 __ lwz(r_temp_1, thread_(stack_guard_state));
2566 __ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
2567 __ bne(CR0, no_reguard);
2568
2569 save_native_result(masm, ret_type, workspace_slot_offset);
2570 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2571 restore_native_result(masm, ret_type, workspace_slot_offset);
2572
2573 __ bind(no_reguard);
2574
2575
2576 // Unlock
2577 // --------------------------------------------------------------------------
2578
2579 if (method->is_synchronized()) {
2580 const Register r_oop = r_temp_4;
2581 const Register r_box = r_temp_5;
2582 const Register r_exception = r_temp_6;
2583 Label done;
2584
2585 // Get oop and address of lock object box.
2586 if (method_is_static) {
2587 assert(klass_offset != -1, "");
2588 __ ld(r_oop, klass_offset, R1_SP);
2589 } else {
2590 assert(receiver_offset != -1, "");
2591 __ ld(r_oop, receiver_offset, R1_SP);
2592 }
2593 __ addi(r_box, R1_SP, lock_offset);
2594
2595 // Try fastpath for unlocking.
2596 __ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2597 __ beq(CR0, done);
2598
2599 // Save and restore any potential method result value around the unlocking operation.
2600 save_native_result(masm, ret_type, workspace_slot_offset);
2601
2602 // Must save pending exception around the slow-path VM call. Since it's a
2603 // leaf call, the pending exception (if any) can be kept in a register.
2604 __ ld(r_exception, thread_(pending_exception));
2605 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
2606 __ li(R0, 0);
2607 __ std(R0, thread_(pending_exception));
2608
2609 // Slow case of monitor enter.
2610 // Inline a special case of call_VM that disallows any pending_exception.
2611 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread).
2612 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread);
2613
2614 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2615 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C");
2616
2617 restore_native_result(masm, ret_type, workspace_slot_offset);
2618
2619 // Check_forward_pending_exception jump to forward_exception if any pending
2620 // exception is set. The forward_exception routine expects to see the
2621 // exception in pending_exception and not in a register. Kind of clumsy,
2622 // since all folks who branch to forward_exception must have tested
2623 // pending_exception first and hence have it in a register already.
2624 __ std(r_exception, thread_(pending_exception));
2625
2626 __ bind(done);
2627 }
2628
2629 # if 0
2630 // DTrace method exit
2631 # endif
2632
2633 // Clear "last Java frame" SP and PC.
2634 // --------------------------------------------------------------------------
2635
2636 // Last java frame won't be set if we're resuming after preemption
2637 bool maybe_preempted = method->is_object_wait0();
2638 __ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */);
2639
2640 // Unbox oop result, e.g. JNIHandles::resolve value.
2641 // --------------------------------------------------------------------------
2642
2643 if (is_reference_type(ret_type)) {
2644 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE);
2645 }
2646
2647 if (CheckJNICalls) {
2648 // clear_pending_jni_exception_check
2649 __ load_const_optimized(R0, 0L);
2650 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
2651 }
2652
2653 // Reset handle block.
2654 // --------------------------------------------------------------------------
2655 __ ld(r_temp_1, thread_(active_handles));
2656 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
2657 __ li(r_temp_2, 0);
2658 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1);
2659
2660 // Prepare for return
2661 // --------------------------------------------------------------------------
2662 __ pop_frame();
2663 __ restore_LR(R11);
2664
2665 #if INCLUDE_JFR
2666 // We need to do a poll test after unwind in case the sampler
2667 // managed to sample the native frame after returning to Java.
2668 Label L_stub;
2669 int safepoint_offset = __ offset();
2670 if (!UseSIGTRAP) {
2671 __ relocate(relocInfo::poll_return_type);
2672 }
2673 __ safepoint_poll(L_stub, r_temp_2, true /* at_return */, true /* in_nmethod: frame already popped */);
2674 #endif // INCLUDE_JFR
2675
2676 // Check for pending exceptions.
2677 // --------------------------------------------------------------------------
2678 __ ld(r_temp_2, thread_(pending_exception));
2679 __ cmpdi(CR0, r_temp_2, 0);
2680 __ bne(CR0, handle_pending_exception);
2681
2682 // Return.
2683 __ blr();
2684
2685 // Handler for return safepoint (out-of-line).
2686 #if INCLUDE_JFR
2687 if (!UseSIGTRAP) {
2688 __ bind(L_stub);
2689 __ jump_to_polling_page_return_handler_blob(safepoint_offset);
2690 }
2691 #endif // INCLUDE_JFR
2692
2693 // Handler for pending exceptions (out-of-line).
2694 // --------------------------------------------------------------------------
2695 // Since this is a native call, we know the proper exception handler
2696 // is the empty function. We just pop this frame and then jump to
2697 // forward_exception_entry.
2698 __ bind(handle_pending_exception);
2699 __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2700 relocInfo::runtime_call_type);
2701
2702 // Done.
2703 // --------------------------------------------------------------------------
2704
2705 __ flush();
2706
2707 nmethod *nm = nmethod::new_native_nmethod(method,
2708 compile_id,
2709 masm->code(),
2710 vep_start_pc-start_pc,
2711 frame_done_pc-start_pc,
2712 stack_slots / VMRegImpl::slots_per_word,
2713 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2714 in_ByteSize(lock_offset),
2715 oop_maps);
2716
2717 return nm;
2718 }
2719
2720 // This function returns the adjust size (in number of words) to a c2i adapter
2721 // activation for use during deoptimization.
2722 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2723 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words);
2724 }
2725
2726 uint SharedRuntime::in_preserve_stack_slots() {
2727 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size;
2728 }
2729
2730 uint SharedRuntime::out_preserve_stack_slots() {
2731 #if defined(COMPILER1) || defined(COMPILER2)
2732 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2733 #else
2734 return 0;
2735 #endif
2736 }
2737
2738 VMReg SharedRuntime::thread_register() {
2739 // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames).
2740 ShouldNotCallThis();
2741 return nullptr;
2742 }
2743
2744 #if defined(COMPILER1) || defined(COMPILER2)
2745 // Frame generation for deopt and uncommon trap blobs.
2746 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2747 /* Read */
2748 Register unroll_block_reg,
2749 /* Update */
2750 Register frame_sizes_reg,
2751 Register number_of_frames_reg,
2752 Register pcs_reg,
2753 /* Invalidate */
2754 Register frame_size_reg,
2755 Register pc_reg) {
2756
2757 __ ld(pc_reg, 0, pcs_reg);
2758 __ ld(frame_size_reg, 0, frame_sizes_reg);
2759 __ std(pc_reg, _abi0(lr), R1_SP);
2760 __ push_frame(frame_size_reg, R0/*tmp*/);
2761 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
2762 __ addi(number_of_frames_reg, number_of_frames_reg, -1);
2763 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
2764 __ addi(pcs_reg, pcs_reg, wordSize);
2765 }
2766
2767 // Loop through the UnrollBlock info and create new frames.
2768 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
2769 /* read */
2770 Register unroll_block_reg,
2771 /* invalidate */
2772 Register frame_sizes_reg,
2773 Register number_of_frames_reg,
2774 Register pcs_reg,
2775 Register frame_size_reg,
2776 Register pc_reg) {
2777 Label loop;
2778
2779 // _number_of_frames is of type int (deoptimization.hpp)
2780 __ lwa(number_of_frames_reg,
2781 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()),
2782 unroll_block_reg);
2783 __ ld(pcs_reg,
2784 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()),
2785 unroll_block_reg);
2786 __ ld(frame_sizes_reg,
2787 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()),
2788 unroll_block_reg);
2789
2790 // stack: (caller_of_deoptee, ...).
2791
2792 // At this point we either have an interpreter frame or a compiled
2793 // frame on top of stack. If it is a compiled frame we push a new c2i
2794 // adapter here
2795
2796 // Memorize top-frame stack-pointer.
2797 __ mr(frame_size_reg/*old_sp*/, R1_SP);
2798
2799 // Resize interpreter top frame OR C2I adapter.
2800
2801 // At this moment, the top frame (which is the caller of the deoptee) is
2802 // an interpreter frame or a newly pushed C2I adapter or an entry frame.
2803 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
2804 // outgoing arguments.
2805 //
2806 // In order to push the interpreter frame for the deoptee, we need to
2807 // resize the top frame such that we are able to place the deoptee's
2808 // locals in the frame.
2809 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
2810 // into a valid PARENT_IJAVA_FRAME_ABI.
2811
2812 __ lwa(R11_scratch1,
2813 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()),
2814 unroll_block_reg);
2815 __ neg(R11_scratch1, R11_scratch1);
2816
2817 // R11_scratch1 contains size of locals for frame resizing.
2818 // R12_scratch2 contains top frame's lr.
2819
2820 // Resize frame by complete frame size prevents TOC from being
2821 // overwritten by locals. A more stack space saving way would be
2822 // to copy the TOC to its location in the new abi.
2823 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
2824
2825 // now, resize the frame
2826 __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
2827
2828 // In the case where we have resized a c2i frame above, the optional
2829 // alignment below the locals has size 32 (why?).
2830 __ std(R12_scratch2, _abi0(lr), R1_SP);
2831
2832 // Initialize initial_caller_sp.
2833 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
2834
2835 #ifdef ASSERT
2836 // Make sure that there is at least one entry in the array.
2837 __ cmpdi(CR0, number_of_frames_reg, 0);
2838 __ asm_assert_ne("array_size must be > 0");
2839 #endif
2840
2841 // Now push the new interpreter frames.
2842 //
2843 __ bind(loop);
2844 // Allocate a new frame, fill in the pc.
2845 push_skeleton_frame(masm, deopt,
2846 unroll_block_reg,
2847 frame_sizes_reg,
2848 number_of_frames_reg,
2849 pcs_reg,
2850 frame_size_reg,
2851 pc_reg);
2852 __ cmpdi(CR0, number_of_frames_reg, 0);
2853 __ bne(CR0, loop);
2854
2855 // Get the return address pointing into the template interpreter.
2856 __ ld(R0, 0, pcs_reg);
2857 // Store it in the top interpreter frame.
2858 __ std(R0, _abi0(lr), R1_SP);
2859 // Initialize frame_manager_lr of interpreter top frame.
2860 }
2861 #endif
2862
2863 void SharedRuntime::generate_deopt_blob() {
2864 // Allocate space for the code
2865 ResourceMark rm;
2866 // Setup code generation tools
2867 const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2868 CodeBuffer buffer(name, 2048, 1024);
2869 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
2870 Label exec_mode_initialized;
2871 OopMap* map = nullptr;
2872 OopMapSet *oop_maps = new OopMapSet();
2873
2874 // size of ABI112 plus spill slots for R3_RET and F1_RET.
2875 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size;
2876 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
2877 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
2878
2879 const Register exec_mode_reg = R21_tmp1;
2880
2881 const address start = __ pc();
2882 int exception_offset = 0;
2883 int exception_in_tls_offset = 0;
2884 int reexecute_offset = 0;
2885
2886 #if defined(COMPILER1) || defined(COMPILER2)
2887 // --------------------------------------------------------------------------
2888 // Prolog for non exception case!
2889
2890 // We have been called from the deopt handler of the deoptee.
2891 //
2892 // deoptee:
2893 // ...
2894 // call X
2895 // ...
2896 // deopt_handler: call_deopt_stub
2897 // cur. return pc --> ...
2898 //
2899 // The return_pc has been stored in the frame of the deoptee and
2900 // will replace the address of the deopt_handler in the call
2901 // to Deoptimization::fetch_unroll_info below.
2902
2903 // Push the "unpack frame"
2904 // Save everything in sight.
2905 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2906 &first_frame_size_in_bytes,
2907 /*generate_oop_map=*/ true,
2908 RegisterSaver::return_pc_is_lr,
2909 /*save_vectors*/ SuperwordUseVSX);
2910 assert(map != nullptr, "OopMap must have been created");
2911
2912 __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2913 // Save exec mode for unpack_frames.
2914 __ b(exec_mode_initialized);
2915
2916 // --------------------------------------------------------------------------
2917 // Prolog for exception case
2918
2919 // An exception is pending.
2920 // We have been called with a return (interpreter) or a jump (exception blob).
2921 //
2922 // - R3_ARG1: exception oop
2923 // - R4_ARG2: exception pc
2924
2925 exception_offset = __ pc() - start;
2926
2927 BLOCK_COMMENT("Prolog for exception case");
2928
2929 // Store exception oop and pc in thread (location known to GC).
2930 // This is needed since the call to "fetch_unroll_info()" may safepoint.
2931 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2932 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2933 __ std(R4_ARG2, _abi0(lr), R1_SP);
2934
2935 // Vanilla deoptimization with an exception pending in exception_oop.
2936 exception_in_tls_offset = __ pc() - start;
2937
2938 // Push the "unpack frame".
2939 // Save everything in sight.
2940 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2941 &first_frame_size_in_bytes,
2942 /*generate_oop_map=*/ false,
2943 RegisterSaver::return_pc_is_pre_saved,
2944 /*save_vectors*/ SuperwordUseVSX);
2945
2946 // Deopt during an exception. Save exec mode for unpack_frames.
2947 __ li(exec_mode_reg, Deoptimization::Unpack_exception);
2948
2949 // fall through
2950 #ifdef COMPILER1
2951 __ b(exec_mode_initialized);
2952
2953 // Reexecute entry, similar to c2 uncommon trap
2954 reexecute_offset = __ pc() - start;
2955
2956 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2957 &first_frame_size_in_bytes,
2958 /*generate_oop_map=*/ false,
2959 RegisterSaver::return_pc_is_pre_saved,
2960 /*save_vectors*/ SuperwordUseVSX);
2961 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
2962 #endif
2963
2964 // --------------------------------------------------------------------------
2965 __ BIND(exec_mode_initialized);
2966
2967 const Register unroll_block_reg = R22_tmp2;
2968
2969 // We need to set `last_Java_frame' because `fetch_unroll_info' will
2970 // call `last_Java_frame()'. The value of the pc in the frame is not
2971 // particularly important. It just needs to identify this blob.
2972 __ set_last_Java_frame(R1_SP, noreg);
2973
2974 // With EscapeAnalysis turned on, this call may safepoint!
2975 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg);
2976 address calls_return_pc = __ last_calls_return_pc();
2977 // Set an oopmap for the call site that describes all our saved registers.
2978 oop_maps->add_gc_map(calls_return_pc - start, map);
2979
2980 __ reset_last_Java_frame();
2981 // Save the return value.
2982 __ mr(unroll_block_reg, R3_RET);
2983
2984 // Restore only the result registers that have been saved
2985 // by save_volatile_registers(...).
2986 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes, /*save_vectors*/ SuperwordUseVSX);
2987
2988 // reload the exec mode from the UnrollBlock (it might have changed)
2989 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
2990 // In excp_deopt_mode, restore and clear exception oop which we
2991 // stored in the thread during exception entry above. The exception
2992 // oop will be the return value of this stub.
2993 Label skip_restore_excp;
2994 __ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception);
2995 __ bne(CR0, skip_restore_excp);
2996 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2997 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2998 __ li(R0, 0);
2999 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3000 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3001 __ BIND(skip_restore_excp);
3002
3003 __ pop_frame();
3004
3005 // stack: (deoptee, optional i2c, caller of deoptee, ...).
3006
3007 // pop the deoptee's frame
3008 __ pop_frame();
3009
3010 // stack: (caller_of_deoptee, ...).
3011
3012 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3013 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3014 // and the frame is effectively not resized.
3015 Register caller_sp = R23_tmp3;
3016 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3017 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3018
3019 // Loop through the `UnrollBlock' info and create interpreter frames.
3020 push_skeleton_frames(masm, true/*deopt*/,
3021 unroll_block_reg,
3022 R23_tmp3,
3023 R24_tmp4,
3024 R25_tmp5,
3025 R26_tmp6,
3026 R27_tmp7);
3027
3028 // stack: (skeletal interpreter frame, ..., optional skeletal
3029 // interpreter frame, optional c2i, caller of deoptee, ...).
3030
3031 // push an `unpack_frame' taking care of float / int return values.
3032 __ push_frame(frame_size_in_bytes, R0/*tmp*/);
3033
3034 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3035 // skeletal interpreter frame, optional c2i, caller of deoptee,
3036 // ...).
3037
3038 // Spill live volatile registers since we'll do a call.
3039 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3040 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3041
3042 // Let the unpacker layout information in the skeletal frames just
3043 // allocated.
3044 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true);
3045 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
3046 // This is a call to a LEAF method, so no oop map is required.
3047 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3048 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
3049 __ reset_last_Java_frame();
3050
3051 // Restore the volatiles saved above.
3052 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3053 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3054
3055 // Pop the unpack frame.
3056 __ pop_frame();
3057 __ restore_LR(R0);
3058
3059 // stack: (top interpreter frame, ..., optional interpreter frame,
3060 // optional c2i, caller of deoptee, ...).
3061
3062 // Initialize R14_state.
3063 __ restore_interpreter_state(R11_scratch1);
3064 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3065
3066 // Return to the interpreter entry point.
3067 __ blr();
3068 #else // !defined(COMPILER1) && !defined(COMPILER2)
3069 __ unimplemented("deopt blob needed only with compiler");
3070 #endif
3071
3072 // Make sure all code is generated
3073 __ flush();
3074
3075 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
3076 reexecute_offset, first_frame_size_in_bytes / wordSize);
3077 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3078 }
3079
3080 #ifdef COMPILER2
3081 UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
3082 // Allocate space for the code.
3083 ResourceMark rm;
3084 // Setup code generation tools.
3085 const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id);
3086 CodeBuffer buffer(name, 2048, 1024);
3087 if (buffer.blob() == nullptr) {
3088 return nullptr;
3089 }
3090 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
3091 address start = __ pc();
3092
3093 Register unroll_block_reg = R21_tmp1;
3094 Register klass_index_reg = R22_tmp2;
3095 Register unc_trap_reg = R23_tmp3;
3096 Register r_return_pc = R27_tmp7;
3097
3098 OopMapSet* oop_maps = new OopMapSet();
3099 int frame_size_in_bytes = frame::native_abi_reg_args_size;
3100 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
3101
3102 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3103
3104 // Push a dummy `unpack_frame' and call
3105 // `Deoptimization::uncommon_trap' to pack the compiled frame into a
3106 // vframe array and return the `UnrollBlock' information.
3107
3108 // Save LR to compiled frame.
3109 __ save_LR(R11_scratch1);
3110
3111 // Push an "uncommon_trap" frame.
3112 __ push_frame_reg_args(0, R11_scratch1);
3113
3114 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
3115
3116 // Set the `unpack_frame' as last_Java_frame.
3117 // `Deoptimization::uncommon_trap' expects it and considers its
3118 // sender frame as the deoptee frame.
3119 // Remember the offset of the instruction whose address will be
3120 // moved to R11_scratch1.
3121 address gc_map_pc = __ pc();
3122 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true);
3123 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3124
3125 __ mr(klass_index_reg, R3);
3126 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap);
3127 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
3128 R16_thread, klass_index_reg, R5_ARG3);
3129
3130 // Set an oopmap for the call site.
3131 oop_maps->add_gc_map(gc_map_pc - start, map);
3132
3133 __ reset_last_Java_frame();
3134
3135 // Pop the `unpack frame'.
3136 __ pop_frame();
3137
3138 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3139
3140 // Save the return value.
3141 __ mr(unroll_block_reg, R3_RET);
3142
3143 // Pop the uncommon_trap frame.
3144 __ pop_frame();
3145
3146 // stack: (caller_of_deoptee, ...).
3147
3148 #ifdef ASSERT
3149 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
3150 __ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
3151 __ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
3152 #endif
3153
3154 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3155 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3156 // and the frame is effectively not resized.
3157 Register caller_sp = R23_tmp3;
3158 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3159 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3160
3161 // Allocate new interpreter frame(s) and possibly a c2i adapter
3162 // frame.
3163 push_skeleton_frames(masm, false/*deopt*/,
3164 unroll_block_reg,
3165 R22_tmp2,
3166 R23_tmp3,
3167 R24_tmp4,
3168 R25_tmp5,
3169 R26_tmp6);
3170
3171 // stack: (skeletal interpreter frame, ..., optional skeletal
3172 // interpreter frame, optional c2i, caller of deoptee, ...).
3173
3174 // Push a dummy `unpack_frame' taking care of float return values.
3175 // Call `Deoptimization::unpack_frames' to layout information in the
3176 // interpreter frames just created.
3177
3178 // Push a simple "unpack frame" here.
3179 __ push_frame_reg_args(0, R11_scratch1);
3180
3181 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3182 // skeletal interpreter frame, optional c2i, caller of deoptee,
3183 // ...).
3184
3185 // Set the "unpack_frame" as last_Java_frame.
3186 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3187
3188 // Indicate it is the uncommon trap case.
3189 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
3190 // Let the unpacker layout information in the skeletal frames just
3191 // allocated.
3192 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3193 R16_thread, unc_trap_reg);
3194
3195 __ reset_last_Java_frame();
3196 // Pop the `unpack frame'.
3197 __ pop_frame();
3198 // Restore LR from top interpreter frame.
3199 __ restore_LR(R11_scratch1);
3200
3201 // stack: (top interpreter frame, ..., optional interpreter frame,
3202 // optional c2i, caller of deoptee, ...).
3203
3204 __ restore_interpreter_state(R11_scratch1);
3205 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3206
3207 // Return to the interpreter entry point.
3208 __ blr();
3209
3210 masm->flush();
3211
3212 return UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
3213 }
3214 #endif // COMPILER2
3215
3216 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
3217 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
3218 assert(StubRoutines::forward_exception_entry() != nullptr,
3219 "must be generated before");
3220 assert(is_polling_page_id(id), "expected a polling page stub id");
3221
3222 ResourceMark rm;
3223 OopMapSet *oop_maps = new OopMapSet();
3224 OopMap* map;
3225
3226 // Allocate space for the code. Setup code generation tools.
3227 const char* name = SharedRuntime::stub_name(id);
3228 CodeBuffer buffer(name, 2048, 1024);
3229 MacroAssembler* masm = new MacroAssembler(&buffer);
3230
3231 address start = __ pc();
3232 int frame_size_in_bytes = 0;
3233
3234 RegisterSaver::ReturnPCLocation return_pc_location;
3235 bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
3236 if (cause_return) {
3237 // Nothing to do here. The frame has already been popped in MachEpilogNode.
3238 // Register LR already contains the return pc.
3239 return_pc_location = RegisterSaver::return_pc_is_pre_saved;
3240 } else {
3241 // Use thread()->saved_exception_pc() as return pc.
3242 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3243 }
3244
3245 bool save_vectors = (id == StubId::shared_polling_page_vectors_safepoint_handler_id);
3246
3247 // Save registers, fpu state, and flags. Set R31 = return pc.
3248 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3249 &frame_size_in_bytes,
3250 /*generate_oop_map=*/ true,
3251 return_pc_location, save_vectors);
3252
3253 // The following is basically a call_VM. However, we need the precise
3254 // address of the call in order to generate an oopmap. Hence, we do all the
3255 // work ourselves.
3256 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3257
3258 // The return address must always be correct so that the frame constructor
3259 // never sees an invalid pc.
3260
3261 // Do the call
3262 __ call_VM_leaf(call_ptr, R16_thread);
3263 address calls_return_pc = __ last_calls_return_pc();
3264
3265 // Set an oopmap for the call site. This oopmap will map all
3266 // oop-registers and debug-info registers as callee-saved. This
3267 // will allow deoptimization at this safepoint to find all possible
3268 // debug-info recordings, as well as let GC find all oops.
3269 oop_maps->add_gc_map(calls_return_pc - start, map);
3270
3271 Label noException;
3272
3273 // Clear the last Java frame.
3274 __ reset_last_Java_frame();
3275
3276 BLOCK_COMMENT(" Check pending exception.");
3277 const Register pending_exception = R0;
3278 __ ld(pending_exception, thread_(pending_exception));
3279 __ cmpdi(CR0, pending_exception, 0);
3280 __ beq(CR0, noException);
3281
3282 // Exception pending
3283 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3284 frame_size_in_bytes,
3285 /*restore_ctr=*/true, save_vectors);
3286
3287 BLOCK_COMMENT(" Jump to forward_exception_entry.");
3288 // Jump to forward_exception_entry, with the issuing PC in LR
3289 // so it looks like the original nmethod called forward_exception_entry.
3290 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3291
3292 // No exception case.
3293 __ BIND(noException);
3294
3295 if (!cause_return) {
3296 Label no_adjust;
3297 // If our stashed return pc was modified by the runtime we avoid touching it
3298 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP);
3299 __ cmpd(CR0, R0, R31);
3300 __ bne(CR0, no_adjust);
3301
3302 // Adjust return pc forward to step over the safepoint poll instruction
3303 __ addi(R31, R31, 4);
3304 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
3305
3306 __ bind(no_adjust);
3307 }
3308
3309 // Normal exit, restore registers and exit.
3310 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3311 frame_size_in_bytes,
3312 /*restore_ctr=*/true, save_vectors);
3313
3314 __ blr();
3315
3316 // Make sure all code is generated
3317 masm->flush();
3318
3319 // Fill-out other meta info
3320 // CodeBlob frame size is in words.
3321 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3322 }
3323
3324 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3325 //
3326 // Generate a stub that calls into the vm to find out the proper destination
3327 // of a java call. All the argument registers are live at this point
3328 // but since this is generic code we don't know what they are and the caller
3329 // must do any gc of the args.
3330 //
3331 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
3332 assert(is_resolve_id(id), "expected a resolve stub id");
3333
3334 // allocate space for the code
3335 ResourceMark rm;
3336
3337 const char* name = SharedRuntime::stub_name(id);
3338 CodeBuffer buffer(name, 1000, 512);
3339 MacroAssembler* masm = new MacroAssembler(&buffer);
3340
3341 int frame_size_in_bytes;
3342
3343 OopMapSet *oop_maps = new OopMapSet();
3344 OopMap* map = nullptr;
3345
3346 address start = __ pc();
3347
3348 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3349 &frame_size_in_bytes,
3350 /*generate_oop_map*/ true,
3351 RegisterSaver::return_pc_is_lr);
3352
3353 // Use noreg as last_Java_pc, the return pc will be reconstructed
3354 // from the physical frame.
3355 __ set_last_Java_frame(/*sp*/R1_SP, noreg);
3356
3357 int frame_complete = __ offset();
3358
3359 // Pass R19_method as 2nd (optional) argument, used by
3360 // counter_overflow_stub.
3361 __ call_VM_leaf(destination, R16_thread, R19_method);
3362 address calls_return_pc = __ last_calls_return_pc();
3363 // Set an oopmap for the call site.
3364 // We need this not only for callee-saved registers, but also for volatile
3365 // registers that the compiler might be keeping live across a safepoint.
3366 // Create the oopmap for the call's return pc.
3367 oop_maps->add_gc_map(calls_return_pc - start, map);
3368
3369 // R3_RET contains the address we are going to jump to assuming no exception got installed.
3370
3371 // clear last_Java_sp
3372 __ reset_last_Java_frame();
3373
3374 // Check for pending exceptions.
3375 BLOCK_COMMENT("Check for pending exceptions.");
3376 Label pending;
3377 __ ld(R11_scratch1, thread_(pending_exception));
3378 __ cmpdi(CR0, R11_scratch1, 0);
3379 __ bne(CR0, pending);
3380
3381 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
3382
3383 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
3384
3385 // Get the returned method.
3386 __ get_vm_result_metadata(R19_method);
3387
3388 __ bctr();
3389
3390
3391 // Pending exception after the safepoint.
3392 __ BIND(pending);
3393
3394 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
3395
3396 // exception pending => remove activation and forward to exception handler
3397
3398 __ li(R11_scratch1, 0);
3399 __ ld(R3_ARG1, thread_(pending_exception));
3400 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_oop_offset()), R16_thread);
3401 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3402
3403 // -------------
3404 // Make sure all code is generated.
3405 masm->flush();
3406
3407 // return the blob
3408 // frame_size_words or bytes??
3409 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
3410 oop_maps, true);
3411 }
3412
3413 // Continuation point for throwing of implicit exceptions that are
3414 // not handled in the current activation. Fabricates an exception
3415 // oop and initiates normal exception dispatching in this
3416 // frame. Only callee-saved registers are preserved (through the
3417 // normal register window / RegisterMap handling). If the compiler
3418 // needs all registers to be preserved between the fault point and
3419 // the exception handler then it must assume responsibility for that
3420 // in AbstractCompiler::continuation_for_implicit_null_exception or
3421 // continuation_for_implicit_division_by_zero_exception. All other
3422 // implicit exceptions (e.g., NullPointerException or
3423 // AbstractMethodError on entry) are either at call sites or
3424 // otherwise assume that stack unwinding will be initiated, so
3425 // caller saved registers were assumed volatile in the compiler.
3426 //
3427 // Note that we generate only this stub into a RuntimeStub, because
3428 // it needs to be properly traversed and ignored during GC, so we
3429 // change the meaning of the "__" macro within this method.
3430 //
3431 // Note: the routine set_pc_not_at_call_for_caller in
3432 // SharedRuntime.cpp requires that this code be generated into a
3433 // RuntimeStub.
3434 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3435 assert(is_throw_id(id), "expected a throw stub id");
3436
3437 const char* name = SharedRuntime::stub_name(id);
3438
3439 ResourceMark rm;
3440 const char* timer_msg = "SharedRuntime generate_throw_exception";
3441 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3442
3443 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
3444 MacroAssembler* masm = new MacroAssembler(&code);
3445
3446 OopMapSet* oop_maps = new OopMapSet();
3447 int frame_size_in_bytes = frame::native_abi_reg_args_size;
3448 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
3449
3450 address start = __ pc();
3451
3452 __ save_LR(R11_scratch1);
3453
3454 // Push a frame.
3455 __ push_frame_reg_args(0, R11_scratch1);
3456
3457 address frame_complete_pc = __ pc();
3458
3459 // Note that we always have a runtime stub frame on the top of
3460 // stack by this point. Remember the offset of the instruction
3461 // whose address will be moved to R11_scratch1.
3462 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
3463
3464 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
3465
3466 __ mr(R3_ARG1, R16_thread);
3467 __ call_c(runtime_entry);
3468
3469 // Set an oopmap for the call site.
3470 oop_maps->add_gc_map((int)(gc_map_pc - start), map);
3471
3472 __ reset_last_Java_frame();
3473
3474 #ifdef ASSERT
3475 // Make sure that this code is only executed if there is a pending
3476 // exception.
3477 {
3478 Label L;
3479 __ ld(R0,
3480 in_bytes(Thread::pending_exception_offset()),
3481 R16_thread);
3482 __ cmpdi(CR0, R0, 0);
3483 __ bne(CR0, L);
3484 __ stop("SharedRuntime::throw_exception: no pending exception");
3485 __ bind(L);
3486 }
3487 #endif
3488
3489 // Pop frame.
3490 __ pop_frame();
3491
3492 __ restore_LR(R11_scratch1);
3493
3494 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
3495 __ mtctr(R11_scratch1);
3496 __ bctr();
3497
3498 // Create runtime stub with OopMap.
3499 RuntimeStub* stub =
3500 RuntimeStub::new_runtime_stub(name, &code,
3501 /*frame_complete=*/ (int)(frame_complete_pc - start),
3502 frame_size_in_bytes/wordSize,
3503 oop_maps,
3504 false);
3505 return stub;
3506 }
3507
3508 //------------------------------Montgomery multiplication------------------------
3509 //
3510
3511 // Subtract 0:b from carry:a. Return carry.
3512 static unsigned long
3513 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3514 long i = 0;
3515 unsigned long tmp, tmp2;
3516 __asm__ __volatile__ (
3517 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
3518 "mtctr %[len] \n"
3519 "0: \n"
3520 "ldx %[tmp], %[i], %[a] \n"
3521 "ldx %[tmp2], %[i], %[b] \n"
3522 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
3523 "stdx %[tmp], %[i], %[a] \n"
3524 "addi %[i], %[i], 8 \n"
3525 "bdnz 0b \n"
3526 "addme %[tmp], %[carry] \n" // carry + CA - 1
3527 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
3528 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
3529 : "ctr", "xer", "memory"
3530 );
3531 return tmp;
3532 }
3533
3534 // Multiply (unsigned) Long A by Long B, accumulating the double-
3535 // length result into the accumulator formed of T0, T1, and T2.
3536 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3537 unsigned long hi, lo;
3538 __asm__ __volatile__ (
3539 "mulld %[lo], %[A], %[B] \n"
3540 "mulhdu %[hi], %[A], %[B] \n"
3541 "addc %[T0], %[T0], %[lo] \n"
3542 "adde %[T1], %[T1], %[hi] \n"
3543 "addze %[T2], %[T2] \n"
3544 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3545 : [A]"r"(A), [B]"r"(B)
3546 : "xer"
3547 );
3548 }
3549
3550 // As above, but add twice the double-length result into the
3551 // accumulator.
3552 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3553 unsigned long hi, lo;
3554 __asm__ __volatile__ (
3555 "mulld %[lo], %[A], %[B] \n"
3556 "mulhdu %[hi], %[A], %[B] \n"
3557 "addc %[T0], %[T0], %[lo] \n"
3558 "adde %[T1], %[T1], %[hi] \n"
3559 "addze %[T2], %[T2] \n"
3560 "addc %[T0], %[T0], %[lo] \n"
3561 "adde %[T1], %[T1], %[hi] \n"
3562 "addze %[T2], %[T2] \n"
3563 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3564 : [A]"r"(A), [B]"r"(B)
3565 : "xer"
3566 );
3567 }
3568
3569 // Fast Montgomery multiplication. The derivation of the algorithm is
3570 // in "A Cryptographic Library for the Motorola DSP56000,
3571 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
3572 static void
3573 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3574 unsigned long m[], unsigned long inv, int len) {
3575 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3576 int i;
3577
3578 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3579
3580 for (i = 0; i < len; i++) {
3581 int j;
3582 for (j = 0; j < i; j++) {
3583 MACC(a[j], b[i-j], t0, t1, t2);
3584 MACC(m[j], n[i-j], t0, t1, t2);
3585 }
3586 MACC(a[i], b[0], t0, t1, t2);
3587 m[i] = t0 * inv;
3588 MACC(m[i], n[0], t0, t1, t2);
3589
3590 assert(t0 == 0, "broken Montgomery multiply");
3591
3592 t0 = t1; t1 = t2; t2 = 0;
3593 }
3594
3595 for (i = len; i < 2*len; i++) {
3596 int j;
3597 for (j = i-len+1; j < len; j++) {
3598 MACC(a[j], b[i-j], t0, t1, t2);
3599 MACC(m[j], n[i-j], t0, t1, t2);
3600 }
3601 m[i-len] = t0;
3602 t0 = t1; t1 = t2; t2 = 0;
3603 }
3604
3605 while (t0) {
3606 t0 = sub(m, n, t0, len);
3607 }
3608 }
3609
3610 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3611 // multiplies so it should be up to 25% faster than Montgomery
3612 // multiplication. However, its loop control is more complex and it
3613 // may actually run slower on some machines.
3614 static void
3615 montgomery_square(unsigned long a[], unsigned long n[],
3616 unsigned long m[], unsigned long inv, int len) {
3617 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3618 int i;
3619
3620 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3621
3622 for (i = 0; i < len; i++) {
3623 int j;
3624 int end = (i+1)/2;
3625 for (j = 0; j < end; j++) {
3626 MACC2(a[j], a[i-j], t0, t1, t2);
3627 MACC(m[j], n[i-j], t0, t1, t2);
3628 }
3629 if ((i & 1) == 0) {
3630 MACC(a[j], a[j], t0, t1, t2);
3631 }
3632 for (; j < i; j++) {
3633 MACC(m[j], n[i-j], t0, t1, t2);
3634 }
3635 m[i] = t0 * inv;
3636 MACC(m[i], n[0], t0, t1, t2);
3637
3638 assert(t0 == 0, "broken Montgomery square");
3639
3640 t0 = t1; t1 = t2; t2 = 0;
3641 }
3642
3643 for (i = len; i < 2*len; i++) {
3644 int start = i-len+1;
3645 int end = start + (len - start)/2;
3646 int j;
3647 for (j = start; j < end; j++) {
3648 MACC2(a[j], a[i-j], t0, t1, t2);
3649 MACC(m[j], n[i-j], t0, t1, t2);
3650 }
3651 if ((i & 1) == 0) {
3652 MACC(a[j], a[j], t0, t1, t2);
3653 }
3654 for (; j < len; j++) {
3655 MACC(m[j], n[i-j], t0, t1, t2);
3656 }
3657 m[i-len] = t0;
3658 t0 = t1; t1 = t2; t2 = 0;
3659 }
3660
3661 while (t0) {
3662 t0 = sub(m, n, t0, len);
3663 }
3664 }
3665
3666 // The threshold at which squaring is advantageous was determined
3667 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3668 // Doesn't seem to be relevant for Power8 so we use the same value.
3669 #define MONTGOMERY_SQUARING_THRESHOLD 64
3670
3671 // Copy len longwords from s to d, word-swapping as we go. The
3672 // destination array is reversed.
3673 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3674 d += len;
3675 while(len-- > 0) {
3676 d--;
3677 unsigned long s_val = *s;
3678 // Swap words in a longword on little endian machines.
3679 #ifdef VM_LITTLE_ENDIAN
3680 s_val = (s_val << 32) | (s_val >> 32);
3681 #endif
3682 *d = s_val;
3683 s++;
3684 }
3685 }
3686
3687 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3688 jint len, jlong inv,
3689 jint *m_ints) {
3690 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3691 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3692 int longwords = len/2;
3693
3694 // Make very sure we don't use so much space that the stack might
3695 // overflow. 512 jints corresponds to an 16384-bit integer and
3696 // will use here a total of 8k bytes of stack space.
3697 int divisor = sizeof(unsigned long) * 4;
3698 guarantee(longwords <= 8192 / divisor, "must be");
3699 int total_allocation = longwords * sizeof (unsigned long) * 4;
3700 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3701
3702 // Local scratch arrays
3703 unsigned long
3704 *a = scratch + 0 * longwords,
3705 *b = scratch + 1 * longwords,
3706 *n = scratch + 2 * longwords,
3707 *m = scratch + 3 * longwords;
3708
3709 reverse_words((unsigned long *)a_ints, a, longwords);
3710 reverse_words((unsigned long *)b_ints, b, longwords);
3711 reverse_words((unsigned long *)n_ints, n, longwords);
3712
3713 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3714
3715 reverse_words(m, (unsigned long *)m_ints, longwords);
3716 }
3717
3718 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3719 jint len, jlong inv,
3720 jint *m_ints) {
3721 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3722 assert(len % 2 == 0, "array length in montgomery_square must be even");
3723 int longwords = len/2;
3724
3725 // Make very sure we don't use so much space that the stack might
3726 // overflow. 512 jints corresponds to an 16384-bit integer and
3727 // will use here a total of 6k bytes of stack space.
3728 int divisor = sizeof(unsigned long) * 3;
3729 guarantee(longwords <= (8192 / divisor), "must be");
3730 int total_allocation = longwords * sizeof (unsigned long) * 3;
3731 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3732
3733 // Local scratch arrays
3734 unsigned long
3735 *a = scratch + 0 * longwords,
3736 *n = scratch + 1 * longwords,
3737 *m = scratch + 2 * longwords;
3738
3739 reverse_words((unsigned long *)a_ints, a, longwords);
3740 reverse_words((unsigned long *)n_ints, n, longwords);
3741
3742 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3743 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3744 } else {
3745 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3746 }
3747
3748 reverse_words(m, (unsigned long *)m_ints, longwords);
3749 }
3750
3751 #if INCLUDE_JFR
3752
3753 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3754 // It returns a jobject handle to the event writer.
3755 // The handle is dereferenced and the return value is the event writer oop.
3756 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3757 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
3758 CodeBuffer code(name, 512, 64);
3759 MacroAssembler* masm = new MacroAssembler(&code);
3760
3761 Register tmp1 = R10_ARG8;
3762 Register tmp2 = R9_ARG7;
3763
3764 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
3765 address start = __ pc();
3766 __ mflr(tmp1);
3767 __ std(tmp1, _abi0(lr), R1_SP); // save return pc
3768 __ push_frame_reg_args(0, tmp1);
3769 int frame_complete = __ pc() - start;
3770 __ set_last_Java_frame(R1_SP, noreg);
3771 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread);
3772 address calls_return_pc = __ last_calls_return_pc();
3773 __ reset_last_Java_frame();
3774 // The handle is dereferenced through a load barrier.
3775 __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
3776 __ pop_frame();
3777 __ ld(tmp1, _abi0(lr), R1_SP);
3778 __ mtlr(tmp1);
3779 __ blr();
3780
3781 OopMapSet* oop_maps = new OopMapSet();
3782 OopMap* map = new OopMap(framesize, 0);
3783 oop_maps->add_gc_map(calls_return_pc - start, map);
3784
3785 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3786 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3787 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3788 oop_maps, false);
3789 return stub;
3790 }
3791
3792 // For c2: call to return a leased buffer.
3793 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3794 const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
3795 CodeBuffer code(name, 512, 64);
3796 MacroAssembler* masm = new MacroAssembler(&code);
3797
3798 Register tmp1 = R10_ARG8;
3799 Register tmp2 = R9_ARG7;
3800
3801 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
3802 address start = __ pc();
3803 __ mflr(tmp1);
3804 __ std(tmp1, _abi0(lr), R1_SP); // save return pc
3805 __ push_frame_reg_args(0, tmp1);
3806 int frame_complete = __ pc() - start;
3807 __ set_last_Java_frame(R1_SP, noreg);
3808 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread);
3809 address calls_return_pc = __ last_calls_return_pc();
3810 __ reset_last_Java_frame();
3811 __ pop_frame();
3812 __ ld(tmp1, _abi0(lr), R1_SP);
3813 __ mtlr(tmp1);
3814 __ blr();
3815
3816 OopMapSet* oop_maps = new OopMapSet();
3817 OopMap* map = new OopMap(framesize, 0);
3818 oop_maps->add_gc_map(calls_return_pc - start, map);
3819
3820 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3821 RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3822 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3823 oop_maps, false);
3824 return stub;
3825 }
3826
3827 #endif // INCLUDE_JFR