1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "frame_ppc.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/gcLocker.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/compiledICHolder.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "runtime/continuation.hpp"
42 #include "runtime/continuationEntry.inline.hpp"
43 #include "runtime/jniHandles.hpp"
44 #include "runtime/os.inline.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/signature.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "runtime/vframeArray.hpp"
50 #include "utilities/align.hpp"
51 #include "utilities/macros.hpp"
52 #include "vmreg_ppc.inline.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Runtime1.hpp"
55 #endif
56 #ifdef COMPILER2
57 #include "opto/ad.hpp"
58 #include "opto/runtime.hpp"
59 #endif
60
61 #include <alloca.h>
62
63 #define __ masm->
64
65 #ifdef PRODUCT
66 #define BLOCK_COMMENT(str) // nothing
67 #else
68 #define BLOCK_COMMENT(str) __ block_comment(str)
69 #endif
70
71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
72
73
74 class RegisterSaver {
75 // Used for saving volatile registers.
76 public:
77
78 // Support different return pc locations.
79 enum ReturnPCLocation {
80 return_pc_is_lr,
81 return_pc_is_pre_saved,
82 return_pc_is_thread_saved_exception_pc
83 };
84
85 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
86 int* out_frame_size_in_bytes,
87 bool generate_oop_map,
88 int return_pc_adjustment,
89 ReturnPCLocation return_pc_location,
90 bool save_vectors = false);
91 static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
92 int frame_size_in_bytes,
93 bool restore_ctr,
94 bool save_vectors = false);
95
96 static void push_frame_and_save_argument_registers(MacroAssembler* masm,
97 Register r_temp,
98 int frame_size,
99 int total_args,
100 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
101 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
102 int frame_size,
103 int total_args,
104 const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
105
106 // During deoptimization only the result registers need to be restored
107 // all the other values have already been extracted.
108 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
109
110 // Constants and data structures:
111
112 typedef enum {
113 int_reg,
114 float_reg,
115 special_reg,
116 vs_reg
117 } RegisterType;
118
119 typedef enum {
120 reg_size = 8,
121 half_reg_size = reg_size / 2,
122 vs_reg_size = 16
123 } RegisterConstants;
124
125 typedef struct {
126 RegisterType reg_type;
127 int reg_num;
128 VMReg vmreg;
129 } LiveRegType;
130 };
131
132
133 #define RegisterSaver_LiveIntReg(regname) \
134 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() }
135
136 #define RegisterSaver_LiveFloatReg(regname) \
137 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() }
138
139 #define RegisterSaver_LiveSpecialReg(regname) \
140 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
141
142 #define RegisterSaver_LiveVSReg(regname) \
143 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() }
144
145 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
146 // Live registers which get spilled to the stack. Register
147 // positions in this array correspond directly to the stack layout.
148
149 //
150 // live special registers:
151 //
152 RegisterSaver_LiveSpecialReg(SR_CTR),
153 //
154 // live float registers:
155 //
156 RegisterSaver_LiveFloatReg( F0 ),
157 RegisterSaver_LiveFloatReg( F1 ),
158 RegisterSaver_LiveFloatReg( F2 ),
159 RegisterSaver_LiveFloatReg( F3 ),
160 RegisterSaver_LiveFloatReg( F4 ),
161 RegisterSaver_LiveFloatReg( F5 ),
162 RegisterSaver_LiveFloatReg( F6 ),
163 RegisterSaver_LiveFloatReg( F7 ),
164 RegisterSaver_LiveFloatReg( F8 ),
165 RegisterSaver_LiveFloatReg( F9 ),
166 RegisterSaver_LiveFloatReg( F10 ),
167 RegisterSaver_LiveFloatReg( F11 ),
168 RegisterSaver_LiveFloatReg( F12 ),
169 RegisterSaver_LiveFloatReg( F13 ),
170 RegisterSaver_LiveFloatReg( F14 ),
171 RegisterSaver_LiveFloatReg( F15 ),
172 RegisterSaver_LiveFloatReg( F16 ),
173 RegisterSaver_LiveFloatReg( F17 ),
174 RegisterSaver_LiveFloatReg( F18 ),
175 RegisterSaver_LiveFloatReg( F19 ),
176 RegisterSaver_LiveFloatReg( F20 ),
177 RegisterSaver_LiveFloatReg( F21 ),
178 RegisterSaver_LiveFloatReg( F22 ),
179 RegisterSaver_LiveFloatReg( F23 ),
180 RegisterSaver_LiveFloatReg( F24 ),
181 RegisterSaver_LiveFloatReg( F25 ),
182 RegisterSaver_LiveFloatReg( F26 ),
183 RegisterSaver_LiveFloatReg( F27 ),
184 RegisterSaver_LiveFloatReg( F28 ),
185 RegisterSaver_LiveFloatReg( F29 ),
186 RegisterSaver_LiveFloatReg( F30 ),
187 RegisterSaver_LiveFloatReg( F31 ),
188 //
189 // live integer registers:
190 //
191 RegisterSaver_LiveIntReg( R0 ),
192 //RegisterSaver_LiveIntReg( R1 ), // stack pointer
193 RegisterSaver_LiveIntReg( R2 ),
194 RegisterSaver_LiveIntReg( R3 ),
195 RegisterSaver_LiveIntReg( R4 ),
196 RegisterSaver_LiveIntReg( R5 ),
197 RegisterSaver_LiveIntReg( R6 ),
198 RegisterSaver_LiveIntReg( R7 ),
199 RegisterSaver_LiveIntReg( R8 ),
200 RegisterSaver_LiveIntReg( R9 ),
201 RegisterSaver_LiveIntReg( R10 ),
202 RegisterSaver_LiveIntReg( R11 ),
203 RegisterSaver_LiveIntReg( R12 ),
204 //RegisterSaver_LiveIntReg( R13 ), // system thread id
205 RegisterSaver_LiveIntReg( R14 ),
206 RegisterSaver_LiveIntReg( R15 ),
207 RegisterSaver_LiveIntReg( R16 ),
208 RegisterSaver_LiveIntReg( R17 ),
209 RegisterSaver_LiveIntReg( R18 ),
210 RegisterSaver_LiveIntReg( R19 ),
211 RegisterSaver_LiveIntReg( R20 ),
212 RegisterSaver_LiveIntReg( R21 ),
213 RegisterSaver_LiveIntReg( R22 ),
214 RegisterSaver_LiveIntReg( R23 ),
215 RegisterSaver_LiveIntReg( R24 ),
216 RegisterSaver_LiveIntReg( R25 ),
217 RegisterSaver_LiveIntReg( R26 ),
218 RegisterSaver_LiveIntReg( R27 ),
219 RegisterSaver_LiveIntReg( R28 ),
220 RegisterSaver_LiveIntReg( R29 ),
221 RegisterSaver_LiveIntReg( R30 ),
222 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below)
223 };
224
225 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = {
226 //
227 // live vector scalar registers (optional, only these ones are used by C2):
228 //
229 RegisterSaver_LiveVSReg( VSR32 ),
230 RegisterSaver_LiveVSReg( VSR33 ),
231 RegisterSaver_LiveVSReg( VSR34 ),
232 RegisterSaver_LiveVSReg( VSR35 ),
233 RegisterSaver_LiveVSReg( VSR36 ),
234 RegisterSaver_LiveVSReg( VSR37 ),
235 RegisterSaver_LiveVSReg( VSR38 ),
236 RegisterSaver_LiveVSReg( VSR39 ),
237 RegisterSaver_LiveVSReg( VSR40 ),
238 RegisterSaver_LiveVSReg( VSR41 ),
239 RegisterSaver_LiveVSReg( VSR42 ),
240 RegisterSaver_LiveVSReg( VSR43 ),
241 RegisterSaver_LiveVSReg( VSR44 ),
242 RegisterSaver_LiveVSReg( VSR45 ),
243 RegisterSaver_LiveVSReg( VSR46 ),
244 RegisterSaver_LiveVSReg( VSR47 ),
245 RegisterSaver_LiveVSReg( VSR48 ),
246 RegisterSaver_LiveVSReg( VSR49 ),
247 RegisterSaver_LiveVSReg( VSR50 ),
248 RegisterSaver_LiveVSReg( VSR51 )
249 };
250
251
252 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
253 int* out_frame_size_in_bytes,
254 bool generate_oop_map,
255 int return_pc_adjustment,
256 ReturnPCLocation return_pc_location,
257 bool save_vectors) {
258 // Push an abi_reg_args-frame and store all registers which may be live.
259 // If requested, create an OopMap: Record volatile registers as
260 // callee-save values in an OopMap so their save locations will be
261 // propagated to the RegisterMap of the caller frame during
262 // StackFrameStream construction (needed for deoptimization; see
263 // compiledVFrame::create_stack_value).
264 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
265 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
266
267 // calculate frame size
268 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
269 sizeof(RegisterSaver::LiveRegType);
270 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
271 sizeof(RegisterSaver::LiveRegType))
272 : 0;
273 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
274 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
275 + frame::native_abi_reg_args_size;
276
277 *out_frame_size_in_bytes = frame_size_in_bytes;
278 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
279 const int register_save_offset = frame_size_in_bytes - register_save_size;
280
281 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
282 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr;
283
284 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
285
286 // push a new frame
287 __ push_frame(frame_size_in_bytes, noreg);
288
289 // Save some registers in the last (non-vector) slots of the new frame so we
290 // can use them as scratch regs or to determine the return pc.
291 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
292 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP);
293
294 // save the flags
295 // Do the save_LR_CR by hand and adjust the return pc if requested.
296 __ mfcr(R30);
297 __ std(R30, frame_size_in_bytes + _abi0(cr), R1_SP);
298 switch (return_pc_location) {
299 case return_pc_is_lr: __ mflr(R31); break;
300 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
301 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
302 default: ShouldNotReachHere();
303 }
304 if (return_pc_location != return_pc_is_pre_saved) {
305 if (return_pc_adjustment != 0) {
306 __ addi(R31, R31, return_pc_adjustment);
307 }
308 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
309 }
310
311 // save all registers (ints and floats)
312 int offset = register_save_offset;
313
314 for (int i = 0; i < regstosave_num; i++) {
315 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
316 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
317
318 switch (reg_type) {
319 case RegisterSaver::int_reg: {
320 if (reg_num < 30) { // We spilled R30-31 right at the beginning.
321 __ std(as_Register(reg_num), offset, R1_SP);
322 }
323 break;
324 }
325 case RegisterSaver::float_reg: {
326 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
327 break;
328 }
329 case RegisterSaver::special_reg: {
330 if (reg_num == SR_CTR.encoding()) {
331 __ mfctr(R30);
332 __ std(R30, offset, R1_SP);
333 } else {
334 Unimplemented();
335 }
336 break;
337 }
338 default:
339 ShouldNotReachHere();
340 }
341
342 if (generate_oop_map) {
343 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
344 RegisterSaver_LiveRegs[i].vmreg);
345 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
346 RegisterSaver_LiveRegs[i].vmreg->next());
347 }
348 offset += reg_size;
349 }
350
351 for (int i = 0; i < vsregstosave_num; i++) {
352 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
353 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type;
354
355 __ li(R30, offset);
356 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP);
357
358 if (generate_oop_map) {
359 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
360 RegisterSaver_LiveVSRegs[i].vmreg);
361 }
362 offset += vs_reg_size;
363 }
364
365 assert(offset == frame_size_in_bytes, "consistency check");
366
367 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
368
369 // And we're done.
370 return map;
371 }
372
373
374 // Pop the current frame and restore all the registers that we
375 // saved.
376 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
377 int frame_size_in_bytes,
378 bool restore_ctr,
379 bool save_vectors) {
380 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
381 sizeof(RegisterSaver::LiveRegType);
382 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
383 sizeof(RegisterSaver::LiveRegType))
384 : 0;
385 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
386
387 const int register_save_offset = frame_size_in_bytes - register_save_size;
388
389 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
390
391 // restore all registers (ints and floats)
392 int offset = register_save_offset;
393
394 for (int i = 0; i < regstosave_num; i++) {
395 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
396 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
397
398 switch (reg_type) {
399 case RegisterSaver::int_reg: {
400 if (reg_num != 31) // R31 restored at the end, it's the tmp reg!
401 __ ld(as_Register(reg_num), offset, R1_SP);
402 break;
403 }
404 case RegisterSaver::float_reg: {
405 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
406 break;
407 }
408 case RegisterSaver::special_reg: {
409 if (reg_num == SR_CTR.encoding()) {
410 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
411 __ ld(R31, offset, R1_SP);
412 __ mtctr(R31);
413 }
414 } else {
415 Unimplemented();
416 }
417 break;
418 }
419 default:
420 ShouldNotReachHere();
421 }
422 offset += reg_size;
423 }
424
425 for (int i = 0; i < vsregstosave_num; i++) {
426 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
427 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type;
428
429 __ li(R31, offset);
430 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP);
431
432 offset += vs_reg_size;
433 }
434
435 assert(offset == frame_size_in_bytes, "consistency check");
436
437 // restore link and the flags
438 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
439 __ mtlr(R31);
440
441 __ ld(R31, frame_size_in_bytes + _abi0(cr), R1_SP);
442 __ mtcr(R31);
443
444 // restore scratch register's value
445 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
446
447 // pop the frame
448 __ addi(R1_SP, R1_SP, frame_size_in_bytes);
449
450 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
451 }
452
453 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
454 int frame_size,int total_args, const VMRegPair *regs,
455 const VMRegPair *regs2) {
456 __ push_frame(frame_size, r_temp);
457 int st_off = frame_size - wordSize;
458 for (int i = 0; i < total_args; i++) {
459 VMReg r_1 = regs[i].first();
460 VMReg r_2 = regs[i].second();
461 if (!r_1->is_valid()) {
462 assert(!r_2->is_valid(), "");
463 continue;
464 }
465 if (r_1->is_Register()) {
466 Register r = r_1->as_Register();
467 __ std(r, st_off, R1_SP);
468 st_off -= wordSize;
469 } else if (r_1->is_FloatRegister()) {
470 FloatRegister f = r_1->as_FloatRegister();
471 __ stfd(f, st_off, R1_SP);
472 st_off -= wordSize;
473 }
474 }
475 if (regs2 != nullptr) {
476 for (int i = 0; i < total_args; i++) {
477 VMReg r_1 = regs2[i].first();
478 VMReg r_2 = regs2[i].second();
479 if (!r_1->is_valid()) {
480 assert(!r_2->is_valid(), "");
481 continue;
482 }
483 if (r_1->is_Register()) {
484 Register r = r_1->as_Register();
485 __ std(r, st_off, R1_SP);
486 st_off -= wordSize;
487 } else if (r_1->is_FloatRegister()) {
488 FloatRegister f = r_1->as_FloatRegister();
489 __ stfd(f, st_off, R1_SP);
490 st_off -= wordSize;
491 }
492 }
493 }
494 }
495
496 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
497 int total_args, const VMRegPair *regs,
498 const VMRegPair *regs2) {
499 int st_off = frame_size - wordSize;
500 for (int i = 0; i < total_args; i++) {
501 VMReg r_1 = regs[i].first();
502 VMReg r_2 = regs[i].second();
503 if (r_1->is_Register()) {
504 Register r = r_1->as_Register();
505 __ ld(r, st_off, R1_SP);
506 st_off -= wordSize;
507 } else if (r_1->is_FloatRegister()) {
508 FloatRegister f = r_1->as_FloatRegister();
509 __ lfd(f, st_off, R1_SP);
510 st_off -= wordSize;
511 }
512 }
513 if (regs2 != nullptr)
514 for (int i = 0; i < total_args; i++) {
515 VMReg r_1 = regs2[i].first();
516 VMReg r_2 = regs2[i].second();
517 if (r_1->is_Register()) {
518 Register r = r_1->as_Register();
519 __ ld(r, st_off, R1_SP);
520 st_off -= wordSize;
521 } else if (r_1->is_FloatRegister()) {
522 FloatRegister f = r_1->as_FloatRegister();
523 __ lfd(f, st_off, R1_SP);
524 st_off -= wordSize;
525 }
526 }
527 __ pop_frame();
528 }
529
530 // Restore the registers that might be holding a result.
531 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
532 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
533 sizeof(RegisterSaver::LiveRegType);
534 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here.
535 const int register_save_offset = frame_size_in_bytes - register_save_size;
536
537 // restore all result registers (ints and floats)
538 int offset = register_save_offset;
539 for (int i = 0; i < regstosave_num; i++) {
540 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
541 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
542 switch (reg_type) {
543 case RegisterSaver::int_reg: {
544 if (as_Register(reg_num)==R3_RET) // int result_reg
545 __ ld(as_Register(reg_num), offset, R1_SP);
546 break;
547 }
548 case RegisterSaver::float_reg: {
549 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
550 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
551 break;
552 }
553 case RegisterSaver::special_reg: {
554 // Special registers don't hold a result.
555 break;
556 }
557 default:
558 ShouldNotReachHere();
559 }
560 offset += reg_size;
561 }
562
563 assert(offset == frame_size_in_bytes, "consistency check");
564 }
565
566 // Is vector's size (in bytes) bigger than a size saved by default?
567 bool SharedRuntime::is_wide_vector(int size) {
568 // Note, MaxVectorSize == 8/16 on PPC64.
569 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
570 return size > 8;
571 }
572
573 static int reg2slot(VMReg r) {
574 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
575 }
576
577 static int reg2offset(VMReg r) {
578 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
579 }
580
581 // ---------------------------------------------------------------------------
582 // Read the array of BasicTypes from a signature, and compute where the
583 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
584 // quantities. Values less than VMRegImpl::stack0 are registers, those above
585 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
586 // as framesizes are fixed.
587 // VMRegImpl::stack0 refers to the first slot 0(sp).
588 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
589 // up to Register::number_of_registers) are the 64-bit
590 // integer registers.
591
592 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
593 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
594 // units regardless of build. Of course for i486 there is no 64 bit build
595
596 // The Java calling convention is a "shifted" version of the C ABI.
597 // By skipping the first C ABI register we can call non-static jni methods
598 // with small numbers of arguments without having to shuffle the arguments
599 // at all. Since we control the java ABI we ought to at least get some
600 // advantage out of it.
601
602 const VMReg java_iarg_reg[8] = {
603 R3->as_VMReg(),
604 R4->as_VMReg(),
605 R5->as_VMReg(),
606 R6->as_VMReg(),
607 R7->as_VMReg(),
608 R8->as_VMReg(),
609 R9->as_VMReg(),
610 R10->as_VMReg()
611 };
612
613 const VMReg java_farg_reg[13] = {
614 F1->as_VMReg(),
615 F2->as_VMReg(),
616 F3->as_VMReg(),
617 F4->as_VMReg(),
618 F5->as_VMReg(),
619 F6->as_VMReg(),
620 F7->as_VMReg(),
621 F8->as_VMReg(),
622 F9->as_VMReg(),
623 F10->as_VMReg(),
624 F11->as_VMReg(),
625 F12->as_VMReg(),
626 F13->as_VMReg()
627 };
628
629 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
630 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
631
632 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j);
633 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j);
634
635 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
636 VMRegPair *regs,
637 int total_args_passed) {
638 // C2c calling conventions for compiled-compiled calls.
639 // Put 8 ints/longs into registers _AND_ 13 float/doubles into
640 // registers _AND_ put the rest on the stack.
641
642 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats
643 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
644
645 int i;
646 VMReg reg;
647 int stk = 0;
648 int ireg = 0;
649 int freg = 0;
650
651 // We put the first 8 arguments into registers and the rest on the
652 // stack, float arguments are already in their argument registers
653 // due to c2c calling conventions (see calling_convention).
654 for (int i = 0; i < total_args_passed; ++i) {
655 switch(sig_bt[i]) {
656 case T_BOOLEAN:
657 case T_CHAR:
658 case T_BYTE:
659 case T_SHORT:
660 case T_INT:
661 if (ireg < num_java_iarg_registers) {
662 // Put int/ptr in register
663 reg = java_iarg_reg[ireg];
664 ++ireg;
665 } else {
666 // Put int/ptr on stack.
667 reg = VMRegImpl::stack2reg(stk);
668 stk += inc_stk_for_intfloat;
669 }
670 regs[i].set1(reg);
671 break;
672 case T_LONG:
673 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
674 if (ireg < num_java_iarg_registers) {
675 // Put long in register.
676 reg = java_iarg_reg[ireg];
677 ++ireg;
678 } else {
679 // Put long on stack. They must be aligned to 2 slots.
680 if (stk & 0x1) ++stk;
681 reg = VMRegImpl::stack2reg(stk);
682 stk += inc_stk_for_longdouble;
683 }
684 regs[i].set2(reg);
685 break;
686 case T_OBJECT:
687 case T_ARRAY:
688 case T_ADDRESS:
689 if (ireg < num_java_iarg_registers) {
690 // Put ptr in register.
691 reg = java_iarg_reg[ireg];
692 ++ireg;
693 } else {
694 // Put ptr on stack. Objects must be aligned to 2 slots too,
695 // because "64-bit pointers record oop-ishness on 2 aligned
696 // adjacent registers." (see OopFlow::build_oop_map).
697 if (stk & 0x1) ++stk;
698 reg = VMRegImpl::stack2reg(stk);
699 stk += inc_stk_for_longdouble;
700 }
701 regs[i].set2(reg);
702 break;
703 case T_FLOAT:
704 if (freg < num_java_farg_registers) {
705 // Put float in register.
706 reg = java_farg_reg[freg];
707 ++freg;
708 } else {
709 // Put float on stack.
710 reg = VMRegImpl::stack2reg(stk);
711 stk += inc_stk_for_intfloat;
712 }
713 regs[i].set1(reg);
714 break;
715 case T_DOUBLE:
716 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
717 if (freg < num_java_farg_registers) {
718 // Put double in register.
719 reg = java_farg_reg[freg];
720 ++freg;
721 } else {
722 // Put double on stack. They must be aligned to 2 slots.
723 if (stk & 0x1) ++stk;
724 reg = VMRegImpl::stack2reg(stk);
725 stk += inc_stk_for_longdouble;
726 }
727 regs[i].set2(reg);
728 break;
729 case T_VOID:
730 // Do not count halves.
731 regs[i].set_bad();
732 break;
733 default:
734 ShouldNotReachHere();
735 }
736 }
737 return stk;
738 }
739
740 #if defined(COMPILER1) || defined(COMPILER2)
741 // Calling convention for calling C code.
742 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
743 VMRegPair *regs,
744 VMRegPair *regs2,
745 int total_args_passed) {
746 // Calling conventions for C runtime calls and calls to JNI native methods.
747 //
748 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
749 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
750 // the first 13 flt/dbl's in the first 13 fp regs but additionally
751 // copy flt/dbl to the stack if they are beyond the 8th argument.
752
753 const VMReg iarg_reg[8] = {
754 R3->as_VMReg(),
755 R4->as_VMReg(),
756 R5->as_VMReg(),
757 R6->as_VMReg(),
758 R7->as_VMReg(),
759 R8->as_VMReg(),
760 R9->as_VMReg(),
761 R10->as_VMReg()
762 };
763
764 const VMReg farg_reg[13] = {
765 F1->as_VMReg(),
766 F2->as_VMReg(),
767 F3->as_VMReg(),
768 F4->as_VMReg(),
769 F5->as_VMReg(),
770 F6->as_VMReg(),
771 F7->as_VMReg(),
772 F8->as_VMReg(),
773 F9->as_VMReg(),
774 F10->as_VMReg(),
775 F11->as_VMReg(),
776 F12->as_VMReg(),
777 F13->as_VMReg()
778 };
779
780 // Check calling conventions consistency.
781 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
782 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
783 "consistency");
784
785 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy
786 // 2 such slots, like 64 bit values do.
787 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats
788 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
789
790 int i;
791 VMReg reg;
792 // Leave room for C-compatible ABI_REG_ARGS.
793 int stk = (frame::native_abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
794 int arg = 0;
795 int freg = 0;
796
797 // Avoid passing C arguments in the wrong stack slots.
798 #if defined(ABI_ELFv2)
799 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
800 "passing C arguments in wrong stack slots");
801 #else
802 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
803 "passing C arguments in wrong stack slots");
804 #endif
805 // We fill-out regs AND regs2 if an argument must be passed in a
806 // register AND in a stack slot. If regs2 is null in such a
807 // situation, we bail-out with a fatal error.
808 for (int i = 0; i < total_args_passed; ++i, ++arg) {
809 // Initialize regs2 to BAD.
810 if (regs2 != nullptr) regs2[i].set_bad();
811
812 switch(sig_bt[i]) {
813
814 //
815 // If arguments 0-7 are integers, they are passed in integer registers.
816 // Argument i is placed in iarg_reg[i].
817 //
818 case T_BOOLEAN:
819 case T_CHAR:
820 case T_BYTE:
821 case T_SHORT:
822 case T_INT:
823 // We must cast ints to longs and use full 64 bit stack slots
824 // here. Thus fall through, handle as long.
825 case T_LONG:
826 case T_OBJECT:
827 case T_ARRAY:
828 case T_ADDRESS:
829 case T_METADATA:
830 // Oops are already boxed if required (JNI).
831 if (arg < Argument::n_int_register_parameters_c) {
832 reg = iarg_reg[arg];
833 } else {
834 reg = VMRegImpl::stack2reg(stk);
835 stk += inc_stk_for_longdouble;
836 }
837 regs[i].set2(reg);
838 break;
839
840 //
841 // Floats are treated differently from int regs: The first 13 float arguments
842 // are passed in registers (not the float args among the first 13 args).
843 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed
844 // in farg_reg[j] if argument i is the j-th float argument of this call.
845 //
846 case T_FLOAT:
847 #if defined(LINUX)
848 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
849 // in the least significant word of an argument slot.
850 #if defined(VM_LITTLE_ENDIAN)
851 #define FLOAT_WORD_OFFSET_IN_SLOT 0
852 #else
853 #define FLOAT_WORD_OFFSET_IN_SLOT 1
854 #endif
855 #elif defined(AIX)
856 // Although AIX runs on big endian CPU, float is in the most
857 // significant word of an argument slot.
858 #define FLOAT_WORD_OFFSET_IN_SLOT 0
859 #else
860 #error "unknown OS"
861 #endif
862 if (freg < Argument::n_float_register_parameters_c) {
863 // Put float in register ...
864 reg = farg_reg[freg];
865 ++freg;
866
867 // Argument i for i > 8 is placed on the stack even if it's
868 // placed in a register (if it's a float arg). Aix disassembly
869 // shows that xlC places these float args on the stack AND in
870 // a register. This is not documented, but we follow this
871 // convention, too.
872 if (arg >= Argument::n_regs_not_on_stack_c) {
873 // ... and on the stack.
874 guarantee(regs2 != nullptr, "must pass float in register and stack slot");
875 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
876 regs2[i].set1(reg2);
877 stk += inc_stk_for_intfloat;
878 }
879
880 } else {
881 // Put float on stack.
882 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
883 stk += inc_stk_for_intfloat;
884 }
885 regs[i].set1(reg);
886 break;
887 case T_DOUBLE:
888 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
889 if (freg < Argument::n_float_register_parameters_c) {
890 // Put double in register ...
891 reg = farg_reg[freg];
892 ++freg;
893
894 // Argument i for i > 8 is placed on the stack even if it's
895 // placed in a register (if it's a double arg). Aix disassembly
896 // shows that xlC places these float args on the stack AND in
897 // a register. This is not documented, but we follow this
898 // convention, too.
899 if (arg >= Argument::n_regs_not_on_stack_c) {
900 // ... and on the stack.
901 guarantee(regs2 != nullptr, "must pass float in register and stack slot");
902 VMReg reg2 = VMRegImpl::stack2reg(stk);
903 regs2[i].set2(reg2);
904 stk += inc_stk_for_longdouble;
905 }
906 } else {
907 // Put double on stack.
908 reg = VMRegImpl::stack2reg(stk);
909 stk += inc_stk_for_longdouble;
910 }
911 regs[i].set2(reg);
912 break;
913
914 case T_VOID:
915 // Do not count halves.
916 regs[i].set_bad();
917 --arg;
918 break;
919 default:
920 ShouldNotReachHere();
921 }
922 }
923
924 return align_up(stk, 2);
925 }
926 #endif // COMPILER2
927
928 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
929 uint num_bits,
930 uint total_args_passed) {
931 Unimplemented();
932 return 0;
933 }
934
935 static address gen_c2i_adapter(MacroAssembler *masm,
936 int total_args_passed,
937 int comp_args_on_stack,
938 const BasicType *sig_bt,
939 const VMRegPair *regs,
940 Label& call_interpreter,
941 const Register& ientry) {
942
943 address c2i_entrypoint;
944
945 const Register sender_SP = R21_sender_SP; // == R21_tmp1
946 const Register code = R22_tmp2;
947 //const Register ientry = R23_tmp3;
948 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
949 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
950 int value_regs_index = 0;
951
952 const Register return_pc = R27_tmp7;
953 const Register tmp = R28_tmp8;
954
955 assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
956
957 // Adapter needs TOP_IJAVA_FRAME_ABI.
958 const int adapter_size = frame::top_ijava_frame_abi_size +
959 align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
960
961 // regular (verified) c2i entry point
962 c2i_entrypoint = __ pc();
963
964 // Does compiled code exists? If yes, patch the caller's callsite.
965 __ ld(code, method_(code));
966 __ cmpdi(CCR0, code, 0);
967 __ ld(ientry, method_(interpreter_entry)); // preloaded
968 __ beq(CCR0, call_interpreter);
969
970
971 // Patch caller's callsite, method_(code) was not null which means that
972 // compiled code exists.
973 __ mflr(return_pc);
974 __ std(return_pc, _abi0(lr), R1_SP);
975 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
976
977 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
978
979 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
980 __ ld(return_pc, _abi0(lr), R1_SP);
981 __ ld(ientry, method_(interpreter_entry)); // preloaded
982 __ mtlr(return_pc);
983
984
985 // Call the interpreter.
986 __ BIND(call_interpreter);
987 __ mtctr(ientry);
988
989 // Get a copy of the current SP for loading caller's arguments.
990 __ mr(sender_SP, R1_SP);
991
992 // Add space for the adapter.
993 __ resize_frame(-adapter_size, R12_scratch2);
994
995 int st_off = adapter_size - wordSize;
996
997 // Write the args into the outgoing interpreter space.
998 for (int i = 0; i < total_args_passed; i++) {
999 VMReg r_1 = regs[i].first();
1000 VMReg r_2 = regs[i].second();
1001 if (!r_1->is_valid()) {
1002 assert(!r_2->is_valid(), "");
1003 continue;
1004 }
1005 if (r_1->is_stack()) {
1006 Register tmp_reg = value_regs[value_regs_index];
1007 value_regs_index = (value_regs_index + 1) % num_value_regs;
1008 // The calling convention produces OptoRegs that ignore the out
1009 // preserve area (JIT's ABI). We must account for it here.
1010 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
1011 if (!r_2->is_valid()) {
1012 __ lwz(tmp_reg, ld_off, sender_SP);
1013 } else {
1014 __ ld(tmp_reg, ld_off, sender_SP);
1015 }
1016 // Pretend stack targets were loaded into tmp_reg.
1017 r_1 = tmp_reg->as_VMReg();
1018 }
1019
1020 if (r_1->is_Register()) {
1021 Register r = r_1->as_Register();
1022 if (!r_2->is_valid()) {
1023 __ stw(r, st_off, R1_SP);
1024 st_off-=wordSize;
1025 } else {
1026 // Longs are given 2 64-bit slots in the interpreter, but the
1027 // data is passed in only 1 slot.
1028 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
1029 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1030 st_off-=wordSize;
1031 }
1032 __ std(r, st_off, R1_SP);
1033 st_off-=wordSize;
1034 }
1035 } else {
1036 assert(r_1->is_FloatRegister(), "");
1037 FloatRegister f = r_1->as_FloatRegister();
1038 if (!r_2->is_valid()) {
1039 __ stfs(f, st_off, R1_SP);
1040 st_off-=wordSize;
1041 } else {
1042 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
1043 // data is passed in only 1 slot.
1044 // One of these should get known junk...
1045 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
1046 st_off-=wordSize;
1047 __ stfd(f, st_off, R1_SP);
1048 st_off-=wordSize;
1049 }
1050 }
1051 }
1052
1053 // Jump to the interpreter just as if interpreter was doing it.
1054
1055 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
1056
1057 // load TOS
1058 __ addi(R15_esp, R1_SP, st_off);
1059
1060 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
1061 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
1062 __ bctr();
1063
1064 return c2i_entrypoint;
1065 }
1066
1067 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1068 int total_args_passed,
1069 int comp_args_on_stack,
1070 const BasicType *sig_bt,
1071 const VMRegPair *regs) {
1072
1073 // Load method's entry-point from method.
1074 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
1075 __ mtctr(R12_scratch2);
1076
1077 // We will only enter here from an interpreted frame and never from after
1078 // passing thru a c2i. Azul allowed this but we do not. If we lose the
1079 // race and use a c2i we will remain interpreted for the race loser(s).
1080 // This removes all sorts of headaches on the x86 side and also eliminates
1081 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
1082
1083 // Note: r13 contains the senderSP on entry. We must preserve it since
1084 // we may do a i2c -> c2i transition if we lose a race where compiled
1085 // code goes non-entrant while we get args ready.
1086 // In addition we use r13 to locate all the interpreter args as
1087 // we must align the stack to 16 bytes on an i2c entry else we
1088 // lose alignment we expect in all compiled code and register
1089 // save code can segv when fxsave instructions find improperly
1090 // aligned stack pointer.
1091
1092 const Register ld_ptr = R15_esp;
1093 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
1094 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
1095 int value_regs_index = 0;
1096
1097 int ld_offset = total_args_passed*wordSize;
1098
1099 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
1100 // in registers, we will occasionally have no stack args.
1101 int comp_words_on_stack = 0;
1102 if (comp_args_on_stack) {
1103 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
1104 // registers are below. By subtracting stack0, we either get a negative
1105 // number (all values in registers) or the maximum stack slot accessed.
1106
1107 // Convert 4-byte c2 stack slots to words.
1108 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1109 // Round up to miminum stack alignment, in wordSize.
1110 comp_words_on_stack = align_up(comp_words_on_stack, 2);
1111 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
1112 }
1113
1114 // Now generate the shuffle code. Pick up all register args and move the
1115 // rest through register value=Z_R12.
1116 BLOCK_COMMENT("Shuffle arguments");
1117 for (int i = 0; i < total_args_passed; i++) {
1118 if (sig_bt[i] == T_VOID) {
1119 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1120 continue;
1121 }
1122
1123 // Pick up 0, 1 or 2 words from ld_ptr.
1124 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1125 "scrambled load targets?");
1126 VMReg r_1 = regs[i].first();
1127 VMReg r_2 = regs[i].second();
1128 if (!r_1->is_valid()) {
1129 assert(!r_2->is_valid(), "");
1130 continue;
1131 }
1132 if (r_1->is_FloatRegister()) {
1133 if (!r_2->is_valid()) {
1134 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
1135 ld_offset-=wordSize;
1136 } else {
1137 // Skip the unused interpreter slot.
1138 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
1139 ld_offset-=2*wordSize;
1140 }
1141 } else {
1142 Register r;
1143 if (r_1->is_stack()) {
1144 // Must do a memory to memory move thru "value".
1145 r = value_regs[value_regs_index];
1146 value_regs_index = (value_regs_index + 1) % num_value_regs;
1147 } else {
1148 r = r_1->as_Register();
1149 }
1150 if (!r_2->is_valid()) {
1151 // Not sure we need to do this but it shouldn't hurt.
1152 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
1153 __ ld(r, ld_offset, ld_ptr);
1154 ld_offset-=wordSize;
1155 } else {
1156 __ lwz(r, ld_offset, ld_ptr);
1157 ld_offset-=wordSize;
1158 }
1159 } else {
1160 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
1161 // data is passed in only 1 slot.
1162 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
1163 ld_offset-=wordSize;
1164 }
1165 __ ld(r, ld_offset, ld_ptr);
1166 ld_offset-=wordSize;
1167 }
1168
1169 if (r_1->is_stack()) {
1170 // Now store value where the compiler expects it
1171 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
1172
1173 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
1174 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) {
1175 __ stw(r, st_off, R1_SP);
1176 } else {
1177 __ std(r, st_off, R1_SP);
1178 }
1179 }
1180 }
1181 }
1182
1183 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1184
1185 BLOCK_COMMENT("Store method");
1186 // Store method into thread->callee_target.
1187 // We might end up in handle_wrong_method if the callee is
1188 // deoptimized as we race thru here. If that happens we don't want
1189 // to take a safepoint because the caller frame will look
1190 // interpreted and arguments are now "compiled" so it is much better
1191 // to make this transition invisible to the stack walking
1192 // code. Unfortunately if we try and find the callee by normal means
1193 // a safepoint is possible. So we stash the desired callee in the
1194 // thread and the vm will find there should this case occur.
1195 __ std(R19_method, thread_(callee_target));
1196
1197 // Jump to the compiled code just as if compiled code was doing it.
1198 __ bctr();
1199 }
1200
1201 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1202 int total_args_passed,
1203 int comp_args_on_stack,
1204 const BasicType *sig_bt,
1205 const VMRegPair *regs,
1206 AdapterFingerPrint* fingerprint) {
1207 address i2c_entry;
1208 address c2i_unverified_entry;
1209 address c2i_entry;
1210
1211
1212 // entry: i2c
1213
1214 __ align(CodeEntryAlignment);
1215 i2c_entry = __ pc();
1216 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1217
1218
1219 // entry: c2i unverified
1220
1221 __ align(CodeEntryAlignment);
1222 BLOCK_COMMENT("c2i unverified entry");
1223 c2i_unverified_entry = __ pc();
1224
1225 // inline_cache contains a compiledICHolder
1226 const Register ic = R19_method;
1227 const Register ic_klass = R11_scratch1;
1228 const Register receiver_klass = R12_scratch2;
1229 const Register code = R21_tmp1;
1230 const Register ientry = R23_tmp3;
1231
1232 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
1233 assert(R11_scratch1 == R11, "need prologue scratch register");
1234
1235 Label call_interpreter;
1236
1237 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
1238 "klass offset should reach into any page");
1239 // Check for null argument if we don't have implicit null checks.
1240 if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
1241 if (TrapBasedNullChecks) {
1242 __ trap_null_check(R3_ARG1);
1243 } else {
1244 Label valid;
1245 __ cmpdi(CCR0, R3_ARG1, 0);
1246 __ bne_predict_taken(CCR0, valid);
1247 // We have a null argument, branch to ic_miss_stub.
1248 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
1249 relocInfo::runtime_call_type);
1250 __ BIND(valid);
1251 }
1252 }
1253 // Assume argument is not null, load klass from receiver.
1254 __ load_klass(receiver_klass, R3_ARG1);
1255
1256 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
1257
1258 if (TrapBasedICMissChecks) {
1259 __ trap_ic_miss_check(receiver_klass, ic_klass);
1260 } else {
1261 Label valid;
1262 __ cmpd(CCR0, receiver_klass, ic_klass);
1263 __ beq_predict_taken(CCR0, valid);
1264 // We have an unexpected klass, branch to ic_miss_stub.
1265 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
1266 relocInfo::runtime_call_type);
1267 __ BIND(valid);
1268 }
1269
1270 // Argument is valid and klass is as expected, continue.
1271
1272 // Extract method from inline cache, verified entry point needs it.
1273 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
1274 assert(R19_method == ic, "the inline cache register is dead here");
1275
1276 __ ld(code, method_(code));
1277 __ cmpdi(CCR0, code, 0);
1278 __ ld(ientry, method_(interpreter_entry)); // preloaded
1279 __ beq_predict_taken(CCR0, call_interpreter);
1280
1281 // Branch to ic_miss_stub.
1282 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1283
1284 // entry: c2i
1285
1286 c2i_entry = __ pc();
1287
1288 // Class initialization barrier for static methods
1289 address c2i_no_clinit_check_entry = nullptr;
1290 if (VM_Version::supports_fast_class_init_checks()) {
1291 Label L_skip_barrier;
1292
1293 { // Bypass the barrier for non-static methods
1294 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
1295 __ andi_(R0, R0, JVM_ACC_STATIC);
1296 __ beq(CCR0, L_skip_barrier); // non-static
1297 }
1298
1299 Register klass = R11_scratch1;
1300 __ load_method_holder(klass, R19_method);
1301 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1302
1303 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1304 __ mtctr(klass);
1305 __ bctr();
1306
1307 __ bind(L_skip_barrier);
1308 c2i_no_clinit_check_entry = __ pc();
1309 }
1310
1311 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1312 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
1313
1314 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1315
1316 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
1317 c2i_no_clinit_check_entry);
1318 }
1319
1320 // An oop arg. Must pass a handle not the oop itself.
1321 static void object_move(MacroAssembler* masm,
1322 int frame_size_in_slots,
1323 OopMap* oop_map, int oop_handle_offset,
1324 bool is_receiver, int* receiver_offset,
1325 VMRegPair src, VMRegPair dst,
1326 Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1327 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1328 "receiver has already been moved");
1329
1330 // We must pass a handle. First figure out the location we use as a handle.
1331
1332 if (src.first()->is_stack()) {
1333 // stack to stack or reg
1334
1335 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1336 Label skip;
1337 const int oop_slot_in_callers_frame = reg2slot(src.first());
1338
1339 guarantee(!is_receiver, "expecting receiver in register");
1340 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
1341
1342 __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
1343 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
1344 __ cmpdi(CCR0, r_temp_2, 0);
1345 __ bne(CCR0, skip);
1346 // Use a null handle if oop is null.
1347 __ li(r_handle, 0);
1348 __ bind(skip);
1349
1350 if (dst.first()->is_stack()) {
1351 // stack to stack
1352 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1353 } else {
1354 // stack to reg
1355 // Nothing to do, r_handle is already the dst register.
1356 }
1357 } else {
1358 // reg to stack or reg
1359 const Register r_oop = src.first()->as_Register();
1360 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1361 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
1362 + oop_handle_offset; // in slots
1363 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
1364 Label skip;
1365
1366 if (is_receiver) {
1367 *receiver_offset = oop_offset;
1368 }
1369 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
1370
1371 __ std( r_oop, oop_offset, R1_SP);
1372 __ addi(r_handle, R1_SP, oop_offset);
1373
1374 __ cmpdi(CCR0, r_oop, 0);
1375 __ bne(CCR0, skip);
1376 // Use a null handle if oop is null.
1377 __ li(r_handle, 0);
1378 __ bind(skip);
1379
1380 if (dst.first()->is_stack()) {
1381 // reg to stack
1382 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1383 } else {
1384 // reg to reg
1385 // Nothing to do, r_handle is already the dst register.
1386 }
1387 }
1388 }
1389
1390 static void int_move(MacroAssembler*masm,
1391 VMRegPair src, VMRegPair dst,
1392 Register r_caller_sp, Register r_temp) {
1393 assert(src.first()->is_valid(), "incoming must be int");
1394 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1395
1396 if (src.first()->is_stack()) {
1397 if (dst.first()->is_stack()) {
1398 // stack to stack
1399 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
1400 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1401 } else {
1402 // stack to reg
1403 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1404 }
1405 } else if (dst.first()->is_stack()) {
1406 // reg to stack
1407 __ extsw(r_temp, src.first()->as_Register());
1408 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1409 } else {
1410 // reg to reg
1411 __ extsw(dst.first()->as_Register(), src.first()->as_Register());
1412 }
1413 }
1414
1415 static void long_move(MacroAssembler*masm,
1416 VMRegPair src, VMRegPair dst,
1417 Register r_caller_sp, Register r_temp) {
1418 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
1419 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1420
1421 if (src.first()->is_stack()) {
1422 if (dst.first()->is_stack()) {
1423 // stack to stack
1424 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1425 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1426 } else {
1427 // stack to reg
1428 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1429 }
1430 } else if (dst.first()->is_stack()) {
1431 // reg to stack
1432 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
1433 } else {
1434 // reg to reg
1435 if (dst.first()->as_Register() != src.first()->as_Register())
1436 __ mr(dst.first()->as_Register(), src.first()->as_Register());
1437 }
1438 }
1439
1440 static void float_move(MacroAssembler*masm,
1441 VMRegPair src, VMRegPair dst,
1442 Register r_caller_sp, Register r_temp) {
1443 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
1444 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
1445
1446 if (src.first()->is_stack()) {
1447 if (dst.first()->is_stack()) {
1448 // stack to stack
1449 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
1450 __ stw(r_temp, reg2offset(dst.first()), R1_SP);
1451 } else {
1452 // stack to reg
1453 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1454 }
1455 } else if (dst.first()->is_stack()) {
1456 // reg to stack
1457 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1458 } else {
1459 // reg to reg
1460 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1461 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1462 }
1463 }
1464
1465 static void double_move(MacroAssembler*masm,
1466 VMRegPair src, VMRegPair dst,
1467 Register r_caller_sp, Register r_temp) {
1468 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
1469 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
1470
1471 if (src.first()->is_stack()) {
1472 if (dst.first()->is_stack()) {
1473 // stack to stack
1474 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1475 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1476 } else {
1477 // stack to reg
1478 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1479 }
1480 } else if (dst.first()->is_stack()) {
1481 // reg to stack
1482 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1483 } else {
1484 // reg to reg
1485 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1486 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1487 }
1488 }
1489
1490 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1491 switch (ret_type) {
1492 case T_BOOLEAN:
1493 case T_CHAR:
1494 case T_BYTE:
1495 case T_SHORT:
1496 case T_INT:
1497 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1498 break;
1499 case T_ARRAY:
1500 case T_OBJECT:
1501 case T_LONG:
1502 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1503 break;
1504 case T_FLOAT:
1505 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1506 break;
1507 case T_DOUBLE:
1508 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1509 break;
1510 case T_VOID:
1511 break;
1512 default:
1513 ShouldNotReachHere();
1514 break;
1515 }
1516 }
1517
1518 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1519 switch (ret_type) {
1520 case T_BOOLEAN:
1521 case T_CHAR:
1522 case T_BYTE:
1523 case T_SHORT:
1524 case T_INT:
1525 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1526 break;
1527 case T_ARRAY:
1528 case T_OBJECT:
1529 case T_LONG:
1530 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1531 break;
1532 case T_FLOAT:
1533 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1534 break;
1535 case T_DOUBLE:
1536 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1537 break;
1538 case T_VOID:
1539 break;
1540 default:
1541 ShouldNotReachHere();
1542 break;
1543 }
1544 }
1545
1546 static void verify_oop_args(MacroAssembler* masm,
1547 const methodHandle& method,
1548 const BasicType* sig_bt,
1549 const VMRegPair* regs) {
1550 Register temp_reg = R19_method; // not part of any compiled calling seq
1551 if (VerifyOops) {
1552 for (int i = 0; i < method->size_of_parameters(); i++) {
1553 if (is_reference_type(sig_bt[i])) {
1554 VMReg r = regs[i].first();
1555 assert(r->is_valid(), "bad oop arg");
1556 if (r->is_stack()) {
1557 __ ld(temp_reg, reg2offset(r), R1_SP);
1558 __ verify_oop(temp_reg, FILE_AND_LINE);
1559 } else {
1560 __ verify_oop(r->as_Register(), FILE_AND_LINE);
1561 }
1562 }
1563 }
1564 }
1565 }
1566
1567 static void gen_special_dispatch(MacroAssembler* masm,
1568 const methodHandle& method,
1569 const BasicType* sig_bt,
1570 const VMRegPair* regs) {
1571 verify_oop_args(masm, method, sig_bt, regs);
1572 vmIntrinsics::ID iid = method->intrinsic_id();
1573
1574 // Now write the args into the outgoing interpreter space
1575 bool has_receiver = false;
1576 Register receiver_reg = noreg;
1577 int member_arg_pos = -1;
1578 Register member_reg = noreg;
1579 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1580 if (ref_kind != 0) {
1581 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1582 member_reg = R19_method; // known to be free at this point
1583 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1584 } else if (iid == vmIntrinsics::_invokeBasic) {
1585 has_receiver = true;
1586 } else if (iid == vmIntrinsics::_linkToNative) {
1587 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
1588 member_reg = R19_method; // known to be free at this point
1589 } else {
1590 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1591 }
1592
1593 if (member_reg != noreg) {
1594 // Load the member_arg into register, if necessary.
1595 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1596 VMReg r = regs[member_arg_pos].first();
1597 if (r->is_stack()) {
1598 __ ld(member_reg, reg2offset(r), R1_SP);
1599 } else {
1600 // no data motion is needed
1601 member_reg = r->as_Register();
1602 }
1603 }
1604
1605 if (has_receiver) {
1606 // Make sure the receiver is loaded into a register.
1607 assert(method->size_of_parameters() > 0, "oob");
1608 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1609 VMReg r = regs[0].first();
1610 assert(r->is_valid(), "bad receiver arg");
1611 if (r->is_stack()) {
1612 // Porting note: This assumes that compiled calling conventions always
1613 // pass the receiver oop in a register. If this is not true on some
1614 // platform, pick a temp and load the receiver from stack.
1615 fatal("receiver always in a register");
1616 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point?
1617 __ ld(receiver_reg, reg2offset(r), R1_SP);
1618 } else {
1619 // no data motion is needed
1620 receiver_reg = r->as_Register();
1621 }
1622 }
1623
1624 // Figure out which address we are really jumping to:
1625 MethodHandles::generate_method_handle_dispatch(masm, iid,
1626 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1627 }
1628
1629 //---------------------------- continuation_enter_setup ---------------------------
1630 //
1631 // Frame setup.
1632 //
1633 // Arguments:
1634 // None.
1635 //
1636 // Results:
1637 // R1_SP: pointer to blank ContinuationEntry in the pushed frame.
1638 //
1639 // Kills:
1640 // R0, R20
1641 //
1642 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) {
1643 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1644 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, "");
1645 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1646
1647 const int frame_size_in_bytes = (int)ContinuationEntry::size();
1648 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error");
1649
1650 framesize_words = frame_size_in_bytes / wordSize;
1651
1652 DEBUG_ONLY(__ block_comment("setup {"));
1653 // Save return pc and push entry frame
1654 const Register return_pc = R20;
1655 __ mflr(return_pc);
1656 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc
1657 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes
1658
1659 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1660
1661 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread);
1662 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1663 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP);
1664 DEBUG_ONLY(__ block_comment("} setup"));
1665
1666 return map;
1667 }
1668
1669 //---------------------------- fill_continuation_entry ---------------------------
1670 //
1671 // Initialize the new ContinuationEntry.
1672 //
1673 // Arguments:
1674 // R1_SP: pointer to blank Continuation entry
1675 // reg_cont_obj: pointer to the continuation
1676 // reg_flags: flags
1677 //
1678 // Results:
1679 // R1_SP: pointer to filled out ContinuationEntry
1680 //
1681 // Kills:
1682 // R8_ARG6, R9_ARG7, R10_ARG8
1683 //
1684 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) {
1685 assert_different_registers(reg_cont_obj, reg_flags);
1686 Register zero = R8_ARG6;
1687 Register tmp2 = R9_ARG7;
1688 Register tmp3 = R10_ARG8;
1689
1690 DEBUG_ONLY(__ block_comment("fill {"));
1691 #ifdef ASSERT
1692 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value());
1693 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP);
1694 #endif //ASSERT
1695
1696 __ li(zero, 0);
1697 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP);
1698 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
1699 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP);
1700 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP);
1701 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP);
1702
1703 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread);
1704 __ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
1705 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1706 __ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
1707
1708 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread);
1709 __ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
1710 DEBUG_ONLY(__ block_comment("} fill"));
1711 }
1712
1713 //---------------------------- continuation_enter_cleanup ---------------------------
1714 //
1715 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread
1716 // before deleting it.
1717 //
1718 // Arguments:
1719 // R1_SP: pointer to the ContinuationEntry
1720 //
1721 // Results:
1722 // None.
1723 //
1724 // Kills:
1725 // R8_ARG6, R9_ARG7, R10_ARG8
1726 //
1727 static void continuation_enter_cleanup(MacroAssembler* masm) {
1728 Register tmp1 = R8_ARG6;
1729 Register tmp2 = R9_ARG7;
1730 Register tmp3 = R10_ARG8;
1731
1732 #ifdef ASSERT
1733 __ block_comment("clean {");
1734 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
1735 __ cmpd(CCR0, R1_SP, tmp1);
1736 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
1737 #endif
1738
1739 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
1740 __ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
1741 __ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP);
1742 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread);
1743 __ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
1744 __ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread);
1745 DEBUG_ONLY(__ block_comment("} clean"));
1746 }
1747
1748 static void check_continuation_enter_argument(VMReg actual_vmreg,
1749 Register expected_reg,
1750 const char* name) {
1751 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name);
1752 assert(actual_vmreg->as_Register() == expected_reg,
1753 "%s is in unexpected register: %s instead of %s",
1754 name, actual_vmreg->as_Register()->name(), expected_reg->name());
1755 }
1756
1757 static void gen_continuation_enter(MacroAssembler* masm,
1758 const VMRegPair* regs,
1759 int& exception_offset,
1760 OopMapSet* oop_maps,
1761 int& frame_complete,
1762 int& framesize_words,
1763 int& interpreted_entry_offset,
1764 int& compiled_entry_offset) {
1765
1766 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1767 int pos_cont_obj = 0;
1768 int pos_is_cont = 1;
1769 int pos_is_virtual = 2;
1770
1771 // The platform-specific calling convention may present the arguments in various registers.
1772 // To simplify the rest of the code, we expect the arguments to reside at these known
1773 // registers, and we additionally check the placement here in case calling convention ever
1774 // changes.
1775 Register reg_cont_obj = R3_ARG1;
1776 Register reg_is_cont = R4_ARG2;
1777 Register reg_is_virtual = R5_ARG3;
1778
1779 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object");
1780 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue");
1781 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread");
1782
1783 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub();
1784
1785 address start = __ pc();
1786
1787 Label L_thaw, L_exit;
1788
1789 // i2i entry used at interp_only_mode only
1790 interpreted_entry_offset = __ pc() - start;
1791 {
1792 #ifdef ASSERT
1793 Label is_interp_only;
1794 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1795 __ cmpwi(CCR0, R0, 0);
1796 __ bne(CCR0, is_interp_only);
1797 __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1798 __ bind(is_interp_only);
1799 #endif
1800
1801 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1802 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp);
1803 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp);
1804 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp);
1805
1806 __ push_cont_fastpath();
1807
1808 OopMap* map = continuation_enter_setup(masm, framesize_words);
1809
1810 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1811 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1812
1813 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1814
1815 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1816 __ cmpwi(CCR0, reg_is_cont, 0);
1817 __ bne(CCR0, L_thaw);
1818
1819 // --- call Continuation.enter(Continuation c, boolean isContinue)
1820
1821 // Emit compiled static call. The call will be always resolved to the c2i
1822 // entry of Continuation.enter(Continuation c, boolean isContinue).
1823 // There are special cases in SharedRuntime::resolve_static_call_C() and
1824 // SharedRuntime::resolve_sub_helper_internal() to achieve this
1825 // See also corresponding call below.
1826 address c2i_call_pc = __ pc();
1827 int start_offset = __ offset();
1828 // Put the entry point as a constant into the constant pool.
1829 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1830 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1831 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1832
1833 // Emit the trampoline stub which will be related to the branch-and-link below.
1834 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1835 guarantee(stub != nullptr, "no space for trampoline stub");
1836
1837 __ relocate(relocInfo::static_call_type);
1838 // Note: At this point we do not have the address of the trampoline
1839 // stub, and the entry point might be too far away for bl, so __ pc()
1840 // serves as dummy and the bl will be patched later.
1841 __ bl(__ pc());
1842 oop_maps->add_gc_map(__ pc() - start, map);
1843 __ post_call_nop();
1844
1845 __ b(L_exit);
1846
1847 // static stub for the call above
1848 CodeBuffer* cbuf = masm->code_section()->outer();
1849 stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
1850 guarantee(stub != nullptr, "no space for static stub");
1851 }
1852
1853 // compiled entry
1854 __ align(CodeEntryAlignment);
1855 compiled_entry_offset = __ pc() - start;
1856
1857 OopMap* map = continuation_enter_setup(masm, framesize_words);
1858
1859 // Frame is now completed as far as size and linkage.
1860 frame_complete =__ pc() - start;
1861
1862 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1863
1864 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1865 __ cmpwi(CCR0, reg_is_cont, 0);
1866 __ bne(CCR0, L_thaw);
1867
1868 // --- call Continuation.enter(Continuation c, boolean isContinue)
1869
1870 // Emit compiled static call
1871 // The call needs to be resolved. There's a special case for this in
1872 // SharedRuntime::find_callee_info_helper() which calls
1873 // LinkResolver::resolve_continuation_enter() which resolves the call to
1874 // Continuation.enter(Continuation c, boolean isContinue).
1875 address call_pc = __ pc();
1876 int start_offset = __ offset();
1877 // Put the entry point as a constant into the constant pool.
1878 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none);
1879 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
1880 guarantee(entry_point_toc_addr != nullptr, "const section overflow");
1881
1882 // Emit the trampoline stub which will be related to the branch-and-link below.
1883 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset);
1884 guarantee(stub != nullptr, "no space for trampoline stub");
1885
1886 __ relocate(relocInfo::static_call_type);
1887 // Note: At this point we do not have the address of the trampoline
1888 // stub, and the entry point might be too far away for bl, so __ pc()
1889 // serves as dummy and the bl will be patched later.
1890 __ bl(__ pc());
1891 oop_maps->add_gc_map(__ pc() - start, map);
1892 __ post_call_nop();
1893
1894 __ b(L_exit);
1895
1896 // --- Thawing path
1897
1898 __ bind(L_thaw);
1899 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw()));
1900 __ mtctr(R0);
1901 __ bctrl();
1902 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1903 ContinuationEntry::_return_pc_offset = __ pc() - start;
1904 __ post_call_nop();
1905
1906 // --- Normal exit (resolve/thawing)
1907
1908 __ bind(L_exit);
1909 continuation_enter_cleanup(masm);
1910
1911 // Pop frame and return
1912 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP));
1913 __ addi(R1_SP, R1_SP, framesize_words*wordSize);
1914 DEBUG_ONLY(__ cmpd(CCR0, R0, R1_SP));
1915 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size");
1916 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1917 __ mtlr(R0);
1918 __ blr();
1919
1920 // --- Exception handling path
1921
1922 exception_offset = __ pc() - start;
1923
1924 continuation_enter_cleanup(masm);
1925 Register ex_pc = R17_tos; // nonvolatile register
1926 Register ex_oop = R15_esp; // nonvolatile register
1927 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc
1928 __ ld(ex_pc, _abi0(lr), ex_pc);
1929 __ mr(ex_oop, R3_RET); // save return value containing the exception oop
1930 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc);
1931 __ mtlr(R3_RET); // the exception handler
1932 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame
1933
1934 // Continue at exception handler
1935 // See OptoRuntime::generate_exception_blob for register arguments
1936 __ mr(R3_ARG1, ex_oop); // pass exception oop
1937 __ mr(R4_ARG2, ex_pc); // pass exception pc
1938 __ blr();
1939
1940 // static stub for the call above
1941 CodeBuffer* cbuf = masm->code_section()->outer();
1942 stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, call_pc);
1943 guarantee(stub != nullptr, "no space for static stub");
1944 }
1945
1946 static void gen_continuation_yield(MacroAssembler* masm,
1947 const VMRegPair* regs,
1948 OopMapSet* oop_maps,
1949 int& frame_complete,
1950 int& framesize_words,
1951 int& compiled_entry_offset) {
1952 Register tmp = R10_ARG8;
1953
1954 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes);
1955 framesize_words = framesize_bytes / wordSize;
1956
1957 address start = __ pc();
1958 compiled_entry_offset = __ pc() - start;
1959
1960 // Save return pc and push entry frame
1961 __ mflr(tmp);
1962 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc
1963 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes
1964
1965 DEBUG_ONLY(__ block_comment("Frame Complete"));
1966 frame_complete = __ pc() - start;
1967 address last_java_pc = __ pc();
1968
1969 // This nop must be exactly at the PC we push into the frame info.
1970 // We use this nop for fast CodeBlob lookup, associate the OopMap
1971 // with it right away.
1972 __ post_call_nop();
1973 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1);
1974 oop_maps->add_gc_map(last_java_pc - start, map);
1975
1976 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated
1977 __ set_last_Java_frame(R1_SP, tmp);
1978 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP);
1979 __ reset_last_Java_frame();
1980
1981 Label L_pinned;
1982
1983 __ cmpwi(CCR0, R3_RET, 0);
1984 __ bne(CCR0, L_pinned);
1985
1986 // yield succeeded
1987
1988 // Pop frames of continuation including this stub's frame
1989 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
1990 // The frame pushed by gen_continuation_enter is on top now again
1991 continuation_enter_cleanup(masm);
1992
1993 // Pop frame and return
1994 Label L_return;
1995 __ bind(L_return);
1996 __ pop_frame();
1997 __ ld(R0, _abi0(lr), R1_SP); // Return pc
1998 __ mtlr(R0);
1999 __ blr();
2000
2001 // yield failed - continuation is pinned
2002
2003 __ bind(L_pinned);
2004
2005 // handle pending exception thrown by freeze
2006 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
2007 __ cmpdi(CCR0, tmp, 0);
2008 __ beq(CCR0, L_return); // return if no exception is pending
2009 __ pop_frame();
2010 __ ld(R0, _abi0(lr), R1_SP); // Return pc
2011 __ mtlr(R0);
2012 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0);
2013 __ mtctr(tmp);
2014 __ bctr();
2015 }
2016
2017 // ---------------------------------------------------------------------------
2018 // Generate a native wrapper for a given method. The method takes arguments
2019 // in the Java compiled code convention, marshals them to the native
2020 // convention (handlizes oops, etc), transitions to native, makes the call,
2021 // returns to java state (possibly blocking), unhandlizes any result and
2022 // returns.
2023 //
2024 // Critical native functions are a shorthand for the use of
2025 // GetPrimtiveArrayCritical and disallow the use of any other JNI
2026 // functions. The wrapper is expected to unpack the arguments before
2027 // passing them to the callee. Critical native functions leave the state _in_Java,
2028 // since they cannot stop for GC.
2029 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
2030 // block and the check for pending exceptions it's impossible for them
2031 // to be thrown.
2032 //
2033 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
2034 const methodHandle& method,
2035 int compile_id,
2036 BasicType *in_sig_bt,
2037 VMRegPair *in_regs,
2038 BasicType ret_type) {
2039 if (method->is_continuation_native_intrinsic()) {
2040 int exception_offset = -1;
2041 OopMapSet* oop_maps = new OopMapSet();
2042 int frame_complete = -1;
2043 int stack_slots = -1;
2044 int interpreted_entry_offset = -1;
2045 int vep_offset = -1;
2046 if (method->is_continuation_enter_intrinsic()) {
2047 gen_continuation_enter(masm,
2048 in_regs,
2049 exception_offset,
2050 oop_maps,
2051 frame_complete,
2052 stack_slots,
2053 interpreted_entry_offset,
2054 vep_offset);
2055 } else if (method->is_continuation_yield_intrinsic()) {
2056 gen_continuation_yield(masm,
2057 in_regs,
2058 oop_maps,
2059 frame_complete,
2060 stack_slots,
2061 vep_offset);
2062 } else {
2063 guarantee(false, "Unknown Continuation native intrinsic");
2064 }
2065
2066 #ifdef ASSERT
2067 if (method->is_continuation_enter_intrinsic()) {
2068 assert(interpreted_entry_offset != -1, "Must be set");
2069 assert(exception_offset != -1, "Must be set");
2070 } else {
2071 assert(interpreted_entry_offset == -1, "Must be unset");
2072 assert(exception_offset == -1, "Must be unset");
2073 }
2074 assert(frame_complete != -1, "Must be set");
2075 assert(stack_slots != -1, "Must be set");
2076 assert(vep_offset != -1, "Must be set");
2077 #endif
2078
2079 __ flush();
2080 nmethod* nm = nmethod::new_native_nmethod(method,
2081 compile_id,
2082 masm->code(),
2083 vep_offset,
2084 frame_complete,
2085 stack_slots,
2086 in_ByteSize(-1),
2087 in_ByteSize(-1),
2088 oop_maps,
2089 exception_offset);
2090 if (method->is_continuation_enter_intrinsic()) {
2091 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
2092 } else if (method->is_continuation_yield_intrinsic()) {
2093 _cont_doYield_stub = nm;
2094 }
2095 return nm;
2096 }
2097
2098 if (method->is_method_handle_intrinsic()) {
2099 vmIntrinsics::ID iid = method->intrinsic_id();
2100 intptr_t start = (intptr_t)__ pc();
2101 int vep_offset = ((intptr_t)__ pc()) - start;
2102 gen_special_dispatch(masm,
2103 method,
2104 in_sig_bt,
2105 in_regs);
2106 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
2107 __ flush();
2108 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
2109 return nmethod::new_native_nmethod(method,
2110 compile_id,
2111 masm->code(),
2112 vep_offset,
2113 frame_complete,
2114 stack_slots / VMRegImpl::slots_per_word,
2115 in_ByteSize(-1),
2116 in_ByteSize(-1),
2117 (OopMapSet*)nullptr);
2118 }
2119
2120 address native_func = method->native_function();
2121 assert(native_func != nullptr, "must have function");
2122
2123 // First, create signature for outgoing C call
2124 // --------------------------------------------------------------------------
2125
2126 int total_in_args = method->size_of_parameters();
2127 // We have received a description of where all the java args are located
2128 // on entry to the wrapper. We need to convert these args to where
2129 // the jni function will expect them. To figure out where they go
2130 // we convert the java signature to a C signature by inserting
2131 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2132
2133 // Calculate the total number of C arguments and create arrays for the
2134 // signature and the outgoing registers.
2135 // On ppc64, we have two arrays for the outgoing registers, because
2136 // some floating-point arguments must be passed in registers _and_
2137 // in stack locations.
2138 bool method_is_static = method->is_static();
2139 int total_c_args = total_in_args + (method_is_static ? 2 : 1);
2140
2141 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2142 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2143 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2144 BasicType* in_elem_bt = nullptr;
2145
2146 // Create the signature for the C call:
2147 // 1) add the JNIEnv*
2148 // 2) add the class if the method is static
2149 // 3) copy the rest of the incoming signature (shifted by the number of
2150 // hidden arguments).
2151
2152 int argc = 0;
2153 out_sig_bt[argc++] = T_ADDRESS;
2154 if (method->is_static()) {
2155 out_sig_bt[argc++] = T_OBJECT;
2156 }
2157
2158 for (int i = 0; i < total_in_args ; i++ ) {
2159 out_sig_bt[argc++] = in_sig_bt[i];
2160 }
2161
2162
2163 // Compute the wrapper's frame size.
2164 // --------------------------------------------------------------------------
2165
2166 // Now figure out where the args must be stored and how much stack space
2167 // they require.
2168 //
2169 // Compute framesize for the wrapper. We need to handlize all oops in
2170 // incoming registers.
2171 //
2172 // Calculate the total number of stack slots we will need:
2173 // 1) abi requirements
2174 // 2) outgoing arguments
2175 // 3) space for inbound oop handle area
2176 // 4) space for handlizing a klass if static method
2177 // 5) space for a lock if synchronized method
2178 // 6) workspace for saving return values, int <-> float reg moves, etc.
2179 // 7) alignment
2180 //
2181 // Layout of the native wrapper frame:
2182 // (stack grows upwards, memory grows downwards)
2183 //
2184 // NW [ABI_REG_ARGS] <-- 1) R1_SP
2185 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
2186 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset
2187 // klass <-- 4) R1_SP + klass_offset
2188 // lock <-- 5) R1_SP + lock_offset
2189 // [workspace] <-- 6) R1_SP + workspace_offset
2190 // [alignment] (optional) <-- 7)
2191 // caller [JIT_TOP_ABI_48] <-- r_callers_sp
2192 //
2193 // - *_slot_offset Indicates offset from SP in number of stack slots.
2194 // - *_offset Indicates offset from SP in bytes.
2195
2196 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) + // 1+2)
2197 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
2198
2199 // Now the space for the inbound oop handle area.
2200 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
2201
2202 int oop_handle_slot_offset = stack_slots;
2203 stack_slots += total_save_slots; // 3)
2204
2205 int klass_slot_offset = 0;
2206 int klass_offset = -1;
2207 if (method_is_static) { // 4)
2208 klass_slot_offset = stack_slots;
2209 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2210 stack_slots += VMRegImpl::slots_per_word;
2211 }
2212
2213 int lock_slot_offset = 0;
2214 int lock_offset = -1;
2215 if (method->is_synchronized()) { // 5)
2216 lock_slot_offset = stack_slots;
2217 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
2218 stack_slots += VMRegImpl::slots_per_word;
2219 }
2220
2221 int workspace_slot_offset = stack_slots; // 6)
2222 stack_slots += 2;
2223
2224 // Now compute actual number of stack words we need.
2225 // Rounding to make stack properly aligned.
2226 stack_slots = align_up(stack_slots, // 7)
2227 frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
2228 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
2229
2230
2231 // Now we can start generating code.
2232 // --------------------------------------------------------------------------
2233
2234 intptr_t start_pc = (intptr_t)__ pc();
2235 intptr_t vep_start_pc;
2236 intptr_t frame_done_pc;
2237 intptr_t oopmap_pc;
2238
2239 Label ic_miss;
2240 Label handle_pending_exception;
2241
2242 Register r_callers_sp = R21;
2243 Register r_temp_1 = R22;
2244 Register r_temp_2 = R23;
2245 Register r_temp_3 = R24;
2246 Register r_temp_4 = R25;
2247 Register r_temp_5 = R26;
2248 Register r_temp_6 = R27;
2249 Register r_return_pc = R28;
2250
2251 Register r_carg1_jnienv = noreg;
2252 Register r_carg2_classorobject = noreg;
2253 r_carg1_jnienv = out_regs[0].first()->as_Register();
2254 r_carg2_classorobject = out_regs[1].first()->as_Register();
2255
2256
2257 // Generate the Unverified Entry Point (UEP).
2258 // --------------------------------------------------------------------------
2259 assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2260
2261 // Check ic: object class == cached class?
2262 if (!method_is_static) {
2263 Register ic = R19_inline_cache_reg;
2264 Register receiver_klass = r_temp_1;
2265
2266 __ cmpdi(CCR0, R3_ARG1, 0);
2267 __ beq(CCR0, ic_miss);
2268 __ verify_oop(R3_ARG1, FILE_AND_LINE);
2269 __ load_klass(receiver_klass, R3_ARG1);
2270
2271 __ cmpd(CCR0, receiver_klass, ic);
2272 __ bne(CCR0, ic_miss);
2273 }
2274
2275
2276 // Generate the Verified Entry Point (VEP).
2277 // --------------------------------------------------------------------------
2278 vep_start_pc = (intptr_t)__ pc();
2279
2280 if (UseRTMLocking) {
2281 // Abort RTM transaction before calling JNI
2282 // because critical section can be large and
2283 // abort anyway. Also nmethod can be deoptimized.
2284 __ tabort_();
2285 }
2286
2287 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
2288 Label L_skip_barrier;
2289 Register klass = r_temp_1;
2290 // Notify OOP recorder (don't need the relocation)
2291 AddressLiteral md = __ constant_metadata_address(method->method_holder());
2292 __ load_const_optimized(klass, md.value(), R0);
2293 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
2294
2295 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
2296 __ mtctr(klass);
2297 __ bctr();
2298
2299 __ bind(L_skip_barrier);
2300 }
2301
2302 __ save_LR_CR(r_temp_1);
2303 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2304 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2305 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2306
2307 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2308 bs->nmethod_entry_barrier(masm, r_temp_1);
2309
2310 frame_done_pc = (intptr_t)__ pc();
2311
2312 // Native nmethod wrappers never take possession of the oop arguments.
2313 // So the caller will gc the arguments.
2314 // The only thing we need an oopMap for is if the call is static.
2315 //
2316 // An OopMap for lock (and class if static), and one for the VM call itself.
2317 OopMapSet *oop_maps = new OopMapSet();
2318 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2319
2320 // Move arguments from register/stack to register/stack.
2321 // --------------------------------------------------------------------------
2322 //
2323 // We immediately shuffle the arguments so that for any vm call we have
2324 // to make from here on out (sync slow path, jvmti, etc.) we will have
2325 // captured the oops from our caller and have a valid oopMap for them.
2326 //
2327 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2328 // (derived from JavaThread* which is in R16_thread) and, if static,
2329 // the class mirror instead of a receiver. This pretty much guarantees that
2330 // register layout will not match. We ignore these extra arguments during
2331 // the shuffle. The shuffle is described by the two calling convention
2332 // vectors we have in our possession. We simply walk the java vector to
2333 // get the source locations and the c vector to get the destinations.
2334
2335 // Record sp-based slot for receiver on stack for non-static methods.
2336 int receiver_offset = -1;
2337
2338 // We move the arguments backward because the floating point registers
2339 // destination will always be to a register with a greater or equal
2340 // register number or the stack.
2341 // in is the index of the incoming Java arguments
2342 // out is the index of the outgoing C arguments
2343
2344 #ifdef ASSERT
2345 bool reg_destroyed[Register::number_of_registers];
2346 bool freg_destroyed[FloatRegister::number_of_registers];
2347 for (int r = 0 ; r < Register::number_of_registers ; r++) {
2348 reg_destroyed[r] = false;
2349 }
2350 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) {
2351 freg_destroyed[f] = false;
2352 }
2353 #endif // ASSERT
2354
2355 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
2356
2357 #ifdef ASSERT
2358 if (in_regs[in].first()->is_Register()) {
2359 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
2360 } else if (in_regs[in].first()->is_FloatRegister()) {
2361 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
2362 }
2363 if (out_regs[out].first()->is_Register()) {
2364 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
2365 } else if (out_regs[out].first()->is_FloatRegister()) {
2366 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
2367 }
2368 if (out_regs2[out].first()->is_Register()) {
2369 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true;
2370 } else if (out_regs2[out].first()->is_FloatRegister()) {
2371 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true;
2372 }
2373 #endif // ASSERT
2374
2375 switch (in_sig_bt[in]) {
2376 case T_BOOLEAN:
2377 case T_CHAR:
2378 case T_BYTE:
2379 case T_SHORT:
2380 case T_INT:
2381 // Move int and do sign extension.
2382 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2383 break;
2384 case T_LONG:
2385 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2386 break;
2387 case T_ARRAY:
2388 case T_OBJECT:
2389 object_move(masm, stack_slots,
2390 oop_map, oop_handle_slot_offset,
2391 ((in == 0) && (!method_is_static)), &receiver_offset,
2392 in_regs[in], out_regs[out],
2393 r_callers_sp, r_temp_1, r_temp_2);
2394 break;
2395 case T_VOID:
2396 break;
2397 case T_FLOAT:
2398 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2399 if (out_regs2[out].first()->is_valid()) {
2400 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
2401 }
2402 break;
2403 case T_DOUBLE:
2404 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2405 if (out_regs2[out].first()->is_valid()) {
2406 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
2407 }
2408 break;
2409 case T_ADDRESS:
2410 fatal("found type (T_ADDRESS) in java args");
2411 break;
2412 default:
2413 ShouldNotReachHere();
2414 break;
2415 }
2416 }
2417
2418 // Pre-load a static method's oop into ARG2.
2419 // Used both by locking code and the normal JNI call code.
2420 if (method_is_static) {
2421 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
2422 r_carg2_classorobject);
2423
2424 // Now handlize the static class mirror in carg2. It's known not-null.
2425 __ std(r_carg2_classorobject, klass_offset, R1_SP);
2426 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2427 __ addi(r_carg2_classorobject, R1_SP, klass_offset);
2428 }
2429
2430 // Get JNIEnv* which is first argument to native.
2431 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
2432
2433 // NOTE:
2434 //
2435 // We have all of the arguments setup at this point.
2436 // We MUST NOT touch any outgoing regs from this point on.
2437 // So if we must call out we must push a new frame.
2438
2439 // Get current pc for oopmap, and load it patchable relative to global toc.
2440 oopmap_pc = (intptr_t) __ pc();
2441 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
2442
2443 // We use the same pc/oopMap repeatedly when we call out.
2444 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
2445
2446 // r_return_pc now has the pc loaded that we will use when we finally call
2447 // to native.
2448
2449 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
2450 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
2451
2452 # if 0
2453 // DTrace method entry
2454 # endif
2455
2456 // Lock a synchronized method.
2457 // --------------------------------------------------------------------------
2458
2459 if (method->is_synchronized()) {
2460 Register r_oop = r_temp_4;
2461 const Register r_box = r_temp_5;
2462 Label done, locked;
2463
2464 // Load the oop for the object or class. r_carg2_classorobject contains
2465 // either the handlized oop from the incoming arguments or the handlized
2466 // class mirror (if the method is static).
2467 __ ld(r_oop, 0, r_carg2_classorobject);
2468
2469 // Get the lock box slot's address.
2470 __ addi(r_box, R1_SP, lock_offset);
2471
2472 // Try fastpath for locking.
2473 // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
2474 __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2475 __ beq(CCR0, locked);
2476
2477 // None of the above fast optimizations worked so we have to get into the
2478 // slow case of monitor enter. Inline a special case of call_VM that
2479 // disallows any pending_exception.
2480
2481 // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
2482 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes);
2483 __ mr(R11_scratch1, R1_SP);
2484 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
2485
2486 // Do the call.
2487 __ set_last_Java_frame(R11_scratch1, r_return_pc);
2488 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
2489 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
2490 __ reset_last_Java_frame();
2491
2492 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
2493
2494 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2495 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C");
2496
2497 __ bind(locked);
2498 }
2499
2500 // Use that pc we placed in r_return_pc a while back as the current frame anchor.
2501 __ set_last_Java_frame(R1_SP, r_return_pc);
2502
2503 // Publish thread state
2504 // --------------------------------------------------------------------------
2505
2506 // Transition from _thread_in_Java to _thread_in_native.
2507 __ li(R0, _thread_in_native);
2508 __ release();
2509 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2510 __ stw(R0, thread_(thread_state));
2511
2512
2513 // The JNI call
2514 // --------------------------------------------------------------------------
2515 #if defined(ABI_ELFv2)
2516 __ call_c(native_func, relocInfo::runtime_call_type);
2517 #else
2518 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
2519 __ call_c(fd_native_method, relocInfo::runtime_call_type);
2520 #endif
2521
2522
2523 // Now, we are back from the native code.
2524
2525
2526 // Unpack the native result.
2527 // --------------------------------------------------------------------------
2528
2529 // For int-types, we do any needed sign-extension required.
2530 // Care must be taken that the return values (R3_RET and F1_RET)
2531 // will survive any VM calls for blocking or unlocking.
2532 // An OOP result (handle) is done specially in the slow-path code.
2533
2534 switch (ret_type) {
2535 case T_VOID: break; // Nothing to do!
2536 case T_FLOAT: break; // Got it where we want it (unless slow-path).
2537 case T_DOUBLE: break; // Got it where we want it (unless slow-path).
2538 case T_LONG: break; // Got it where we want it (unless slow-path).
2539 case T_OBJECT: break; // Really a handle.
2540 // Cannot de-handlize until after reclaiming jvm_lock.
2541 case T_ARRAY: break;
2542
2543 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1)
2544 Label skip_modify;
2545 __ cmpwi(CCR0, R3_RET, 0);
2546 __ beq(CCR0, skip_modify);
2547 __ li(R3_RET, 1);
2548 __ bind(skip_modify);
2549 break;
2550 }
2551 case T_BYTE: { // sign extension
2552 __ extsb(R3_RET, R3_RET);
2553 break;
2554 }
2555 case T_CHAR: { // unsigned result
2556 __ andi(R3_RET, R3_RET, 0xffff);
2557 break;
2558 }
2559 case T_SHORT: { // sign extension
2560 __ extsh(R3_RET, R3_RET);
2561 break;
2562 }
2563 case T_INT: // nothing to do
2564 break;
2565 default:
2566 ShouldNotReachHere();
2567 break;
2568 }
2569
2570 Label after_transition;
2571
2572 // Publish thread state
2573 // --------------------------------------------------------------------------
2574
2575 // Switch thread to "native transition" state before reading the
2576 // synchronization state. This additional state is necessary because reading
2577 // and testing the synchronization state is not atomic w.r.t. GC, as this
2578 // scenario demonstrates:
2579 // - Java thread A, in _thread_in_native state, loads _not_synchronized
2580 // and is preempted.
2581 // - VM thread changes sync state to synchronizing and suspends threads
2582 // for GC.
2583 // - Thread A is resumed to finish this native method, but doesn't block
2584 // here since it didn't see any synchronization in progress, and escapes.
2585
2586 // Transition from _thread_in_native to _thread_in_native_trans.
2587 __ li(R0, _thread_in_native_trans);
2588 __ release();
2589 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2590 __ stw(R0, thread_(thread_state));
2591
2592
2593 // Must we block?
2594 // --------------------------------------------------------------------------
2595
2596 // Block, if necessary, before resuming in _thread_in_Java state.
2597 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2598 {
2599 Label no_block, sync;
2600
2601 // Force this write out before the read below.
2602 if (!UseSystemMemoryBarrier) {
2603 __ fence();
2604 }
2605
2606 Register sync_state_addr = r_temp_4;
2607 Register sync_state = r_temp_5;
2608 Register suspend_flags = r_temp_6;
2609
2610 // No synchronization in progress nor yet synchronized
2611 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
2612 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */);
2613
2614 // Not suspended.
2615 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2616 __ lwz(suspend_flags, thread_(suspend_flags));
2617 __ cmpwi(CCR1, suspend_flags, 0);
2618 __ beq(CCR1, no_block);
2619
2620 // Block. Save any potential method result value before the operation and
2621 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2622 // lets us share the oopMap we used when we went native rather than create
2623 // a distinct one for this pc.
2624 __ bind(sync);
2625 __ isync();
2626
2627 address entry_point =
2628 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2629 save_native_result(masm, ret_type, workspace_slot_offset);
2630 __ call_VM_leaf(entry_point, R16_thread);
2631 restore_native_result(masm, ret_type, workspace_slot_offset);
2632
2633 __ bind(no_block);
2634
2635 // Publish thread state.
2636 // --------------------------------------------------------------------------
2637
2638 // Thread state is thread_in_native_trans. Any safepoint blocking has
2639 // already happened so we can now change state to _thread_in_Java.
2640
2641 // Transition from _thread_in_native_trans to _thread_in_Java.
2642 __ li(R0, _thread_in_Java);
2643 __ lwsync(); // Acquire safepoint and suspend state, release thread state.
2644 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2645 __ stw(R0, thread_(thread_state));
2646 __ bind(after_transition);
2647 }
2648
2649 // Reguard any pages if necessary.
2650 // --------------------------------------------------------------------------
2651
2652 Label no_reguard;
2653 __ lwz(r_temp_1, thread_(stack_guard_state));
2654 __ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
2655 __ bne(CCR0, no_reguard);
2656
2657 save_native_result(masm, ret_type, workspace_slot_offset);
2658 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2659 restore_native_result(masm, ret_type, workspace_slot_offset);
2660
2661 __ bind(no_reguard);
2662
2663
2664 // Unlock
2665 // --------------------------------------------------------------------------
2666
2667 if (method->is_synchronized()) {
2668 const Register r_oop = r_temp_4;
2669 const Register r_box = r_temp_5;
2670 const Register r_exception = r_temp_6;
2671 Label done;
2672
2673 // Get oop and address of lock object box.
2674 if (method_is_static) {
2675 assert(klass_offset != -1, "");
2676 __ ld(r_oop, klass_offset, R1_SP);
2677 } else {
2678 assert(receiver_offset != -1, "");
2679 __ ld(r_oop, receiver_offset, R1_SP);
2680 }
2681 __ addi(r_box, R1_SP, lock_offset);
2682
2683 // Try fastpath for unlocking.
2684 __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2685 __ beq(CCR0, done);
2686
2687 // Save and restore any potential method result value around the unlocking operation.
2688 save_native_result(masm, ret_type, workspace_slot_offset);
2689
2690 // Must save pending exception around the slow-path VM call. Since it's a
2691 // leaf call, the pending exception (if any) can be kept in a register.
2692 __ ld(r_exception, thread_(pending_exception));
2693 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
2694 __ li(R0, 0);
2695 __ std(R0, thread_(pending_exception));
2696
2697 // Slow case of monitor enter.
2698 // Inline a special case of call_VM that disallows any pending_exception.
2699 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread).
2700 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread);
2701
2702 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2703 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C");
2704
2705 restore_native_result(masm, ret_type, workspace_slot_offset);
2706
2707 // Check_forward_pending_exception jump to forward_exception if any pending
2708 // exception is set. The forward_exception routine expects to see the
2709 // exception in pending_exception and not in a register. Kind of clumsy,
2710 // since all folks who branch to forward_exception must have tested
2711 // pending_exception first and hence have it in a register already.
2712 __ std(r_exception, thread_(pending_exception));
2713
2714 __ bind(done);
2715 }
2716
2717 # if 0
2718 // DTrace method exit
2719 # endif
2720
2721 // Clear "last Java frame" SP and PC.
2722 // --------------------------------------------------------------------------
2723
2724 __ reset_last_Java_frame();
2725
2726 // Unbox oop result, e.g. JNIHandles::resolve value.
2727 // --------------------------------------------------------------------------
2728
2729 if (is_reference_type(ret_type)) {
2730 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE);
2731 }
2732
2733 if (CheckJNICalls) {
2734 // clear_pending_jni_exception_check
2735 __ load_const_optimized(R0, 0L);
2736 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
2737 }
2738
2739 // Reset handle block.
2740 // --------------------------------------------------------------------------
2741 __ ld(r_temp_1, thread_(active_handles));
2742 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
2743 __ li(r_temp_2, 0);
2744 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1);
2745
2746
2747 // Check for pending exceptions.
2748 // --------------------------------------------------------------------------
2749 __ ld(r_temp_2, thread_(pending_exception));
2750 __ cmpdi(CCR0, r_temp_2, 0);
2751 __ bne(CCR0, handle_pending_exception);
2752
2753 // Return
2754 // --------------------------------------------------------------------------
2755
2756 __ pop_frame();
2757 __ restore_LR_CR(R11);
2758 __ blr();
2759
2760
2761 // Handler for pending exceptions (out-of-line).
2762 // --------------------------------------------------------------------------
2763 // Since this is a native call, we know the proper exception handler
2764 // is the empty function. We just pop this frame and then jump to
2765 // forward_exception_entry.
2766 __ bind(handle_pending_exception);
2767
2768 __ pop_frame();
2769 __ restore_LR_CR(R11);
2770 __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2771 relocInfo::runtime_call_type);
2772
2773 // Handler for a cache miss (out-of-line).
2774 // --------------------------------------------------------------------------
2775
2776 if (!method_is_static) {
2777 __ bind(ic_miss);
2778
2779 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
2780 relocInfo::runtime_call_type);
2781 }
2782
2783 // Done.
2784 // --------------------------------------------------------------------------
2785
2786 __ flush();
2787
2788 nmethod *nm = nmethod::new_native_nmethod(method,
2789 compile_id,
2790 masm->code(),
2791 vep_start_pc-start_pc,
2792 frame_done_pc-start_pc,
2793 stack_slots / VMRegImpl::slots_per_word,
2794 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2795 in_ByteSize(lock_offset),
2796 oop_maps);
2797
2798 return nm;
2799 }
2800
2801 // This function returns the adjust size (in number of words) to a c2i adapter
2802 // activation for use during deoptimization.
2803 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2804 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words);
2805 }
2806
2807 uint SharedRuntime::in_preserve_stack_slots() {
2808 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size;
2809 }
2810
2811 uint SharedRuntime::out_preserve_stack_slots() {
2812 #if defined(COMPILER1) || defined(COMPILER2)
2813 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2814 #else
2815 return 0;
2816 #endif
2817 }
2818
2819 #if defined(COMPILER1) || defined(COMPILER2)
2820 // Frame generation for deopt and uncommon trap blobs.
2821 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2822 /* Read */
2823 Register unroll_block_reg,
2824 /* Update */
2825 Register frame_sizes_reg,
2826 Register number_of_frames_reg,
2827 Register pcs_reg,
2828 /* Invalidate */
2829 Register frame_size_reg,
2830 Register pc_reg) {
2831
2832 __ ld(pc_reg, 0, pcs_reg);
2833 __ ld(frame_size_reg, 0, frame_sizes_reg);
2834 __ std(pc_reg, _abi0(lr), R1_SP);
2835 __ push_frame(frame_size_reg, R0/*tmp*/);
2836 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
2837 __ addi(number_of_frames_reg, number_of_frames_reg, -1);
2838 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
2839 __ addi(pcs_reg, pcs_reg, wordSize);
2840 }
2841
2842 // Loop through the UnrollBlock info and create new frames.
2843 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
2844 /* read */
2845 Register unroll_block_reg,
2846 /* invalidate */
2847 Register frame_sizes_reg,
2848 Register number_of_frames_reg,
2849 Register pcs_reg,
2850 Register frame_size_reg,
2851 Register pc_reg) {
2852 Label loop;
2853
2854 // _number_of_frames is of type int (deoptimization.hpp)
2855 __ lwa(number_of_frames_reg,
2856 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()),
2857 unroll_block_reg);
2858 __ ld(pcs_reg,
2859 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()),
2860 unroll_block_reg);
2861 __ ld(frame_sizes_reg,
2862 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()),
2863 unroll_block_reg);
2864
2865 // stack: (caller_of_deoptee, ...).
2866
2867 // At this point we either have an interpreter frame or a compiled
2868 // frame on top of stack. If it is a compiled frame we push a new c2i
2869 // adapter here
2870
2871 // Memorize top-frame stack-pointer.
2872 __ mr(frame_size_reg/*old_sp*/, R1_SP);
2873
2874 // Resize interpreter top frame OR C2I adapter.
2875
2876 // At this moment, the top frame (which is the caller of the deoptee) is
2877 // an interpreter frame or a newly pushed C2I adapter or an entry frame.
2878 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
2879 // outgoing arguments.
2880 //
2881 // In order to push the interpreter frame for the deoptee, we need to
2882 // resize the top frame such that we are able to place the deoptee's
2883 // locals in the frame.
2884 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
2885 // into a valid PARENT_IJAVA_FRAME_ABI.
2886
2887 __ lwa(R11_scratch1,
2888 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()),
2889 unroll_block_reg);
2890 __ neg(R11_scratch1, R11_scratch1);
2891
2892 // R11_scratch1 contains size of locals for frame resizing.
2893 // R12_scratch2 contains top frame's lr.
2894
2895 // Resize frame by complete frame size prevents TOC from being
2896 // overwritten by locals. A more stack space saving way would be
2897 // to copy the TOC to its location in the new abi.
2898 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
2899
2900 // now, resize the frame
2901 __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
2902
2903 // In the case where we have resized a c2i frame above, the optional
2904 // alignment below the locals has size 32 (why?).
2905 __ std(R12_scratch2, _abi0(lr), R1_SP);
2906
2907 // Initialize initial_caller_sp.
2908 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
2909
2910 #ifdef ASSERT
2911 // Make sure that there is at least one entry in the array.
2912 __ cmpdi(CCR0, number_of_frames_reg, 0);
2913 __ asm_assert_ne("array_size must be > 0");
2914 #endif
2915
2916 // Now push the new interpreter frames.
2917 //
2918 __ bind(loop);
2919 // Allocate a new frame, fill in the pc.
2920 push_skeleton_frame(masm, deopt,
2921 unroll_block_reg,
2922 frame_sizes_reg,
2923 number_of_frames_reg,
2924 pcs_reg,
2925 frame_size_reg,
2926 pc_reg);
2927 __ cmpdi(CCR0, number_of_frames_reg, 0);
2928 __ bne(CCR0, loop);
2929
2930 // Get the return address pointing into the frame manager.
2931 __ ld(R0, 0, pcs_reg);
2932 // Store it in the top interpreter frame.
2933 __ std(R0, _abi0(lr), R1_SP);
2934 // Initialize frame_manager_lr of interpreter top frame.
2935 }
2936 #endif
2937
2938 void SharedRuntime::generate_deopt_blob() {
2939 // Allocate space for the code
2940 ResourceMark rm;
2941 // Setup code generation tools
2942 CodeBuffer buffer("deopt_blob", 2048, 1024);
2943 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
2944 Label exec_mode_initialized;
2945 int frame_size_in_words;
2946 OopMap* map = nullptr;
2947 OopMapSet *oop_maps = new OopMapSet();
2948
2949 // size of ABI112 plus spill slots for R3_RET and F1_RET.
2950 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size;
2951 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
2952 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
2953
2954 const Register exec_mode_reg = R21_tmp1;
2955
2956 const address start = __ pc();
2957
2958 #if defined(COMPILER1) || defined(COMPILER2)
2959 // --------------------------------------------------------------------------
2960 // Prolog for non exception case!
2961
2962 // We have been called from the deopt handler of the deoptee.
2963 //
2964 // deoptee:
2965 // ...
2966 // call X
2967 // ...
2968 // deopt_handler: call_deopt_stub
2969 // cur. return pc --> ...
2970 //
2971 // So currently SR_LR points behind the call in the deopt handler.
2972 // We adjust it such that it points to the start of the deopt handler.
2973 // The return_pc has been stored in the frame of the deoptee and
2974 // will replace the address of the deopt_handler in the call
2975 // to Deoptimization::fetch_unroll_info below.
2976 // We can't grab a free register here, because all registers may
2977 // contain live values, so let the RegisterSaver do the adjustment
2978 // of the return pc.
2979 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
2980
2981 // Push the "unpack frame"
2982 // Save everything in sight.
2983 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2984 &first_frame_size_in_bytes,
2985 /*generate_oop_map=*/ true,
2986 return_pc_adjustment_no_exception,
2987 RegisterSaver::return_pc_is_lr);
2988 assert(map != nullptr, "OopMap must have been created");
2989
2990 __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2991 // Save exec mode for unpack_frames.
2992 __ b(exec_mode_initialized);
2993
2994 // --------------------------------------------------------------------------
2995 // Prolog for exception case
2996
2997 // An exception is pending.
2998 // We have been called with a return (interpreter) or a jump (exception blob).
2999 //
3000 // - R3_ARG1: exception oop
3001 // - R4_ARG2: exception pc
3002
3003 int exception_offset = __ pc() - start;
3004
3005 BLOCK_COMMENT("Prolog for exception case");
3006
3007 // Store exception oop and pc in thread (location known to GC).
3008 // This is needed since the call to "fetch_unroll_info()" may safepoint.
3009 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3010 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3011 __ std(R4_ARG2, _abi0(lr), R1_SP);
3012
3013 // Vanilla deoptimization with an exception pending in exception_oop.
3014 int exception_in_tls_offset = __ pc() - start;
3015
3016 // Push the "unpack frame".
3017 // Save everything in sight.
3018 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3019 &first_frame_size_in_bytes,
3020 /*generate_oop_map=*/ false,
3021 /*return_pc_adjustment_exception=*/ 0,
3022 RegisterSaver::return_pc_is_pre_saved);
3023
3024 // Deopt during an exception. Save exec mode for unpack_frames.
3025 __ li(exec_mode_reg, Deoptimization::Unpack_exception);
3026
3027 // fall through
3028
3029 int reexecute_offset = 0;
3030 #ifdef COMPILER1
3031 __ b(exec_mode_initialized);
3032
3033 // Reexecute entry, similar to c2 uncommon trap
3034 reexecute_offset = __ pc() - start;
3035
3036 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3037 &first_frame_size_in_bytes,
3038 /*generate_oop_map=*/ false,
3039 /*return_pc_adjustment_reexecute=*/ 0,
3040 RegisterSaver::return_pc_is_pre_saved);
3041 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
3042 #endif
3043
3044 // --------------------------------------------------------------------------
3045 __ BIND(exec_mode_initialized);
3046
3047 const Register unroll_block_reg = R22_tmp2;
3048
3049 // We need to set `last_Java_frame' because `fetch_unroll_info' will
3050 // call `last_Java_frame()'. The value of the pc in the frame is not
3051 // particularly important. It just needs to identify this blob.
3052 __ set_last_Java_frame(R1_SP, noreg);
3053
3054 // With EscapeAnalysis turned on, this call may safepoint!
3055 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg);
3056 address calls_return_pc = __ last_calls_return_pc();
3057 // Set an oopmap for the call site that describes all our saved registers.
3058 oop_maps->add_gc_map(calls_return_pc - start, map);
3059
3060 __ reset_last_Java_frame();
3061 // Save the return value.
3062 __ mr(unroll_block_reg, R3_RET);
3063
3064 // Restore only the result registers that have been saved
3065 // by save_volatile_registers(...).
3066 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
3067
3068 // reload the exec mode from the UnrollBlock (it might have changed)
3069 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
3070 // In excp_deopt_mode, restore and clear exception oop which we
3071 // stored in the thread during exception entry above. The exception
3072 // oop will be the return value of this stub.
3073 Label skip_restore_excp;
3074 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
3075 __ bne(CCR0, skip_restore_excp);
3076 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3077 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3078 __ li(R0, 0);
3079 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
3080 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
3081 __ BIND(skip_restore_excp);
3082
3083 __ pop_frame();
3084
3085 // stack: (deoptee, optional i2c, caller of deoptee, ...).
3086
3087 // pop the deoptee's frame
3088 __ pop_frame();
3089
3090 // stack: (caller_of_deoptee, ...).
3091
3092 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3093 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3094 // and the frame is effectively not resized.
3095 Register caller_sp = R23_tmp3;
3096 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3097 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3098
3099 // Loop through the `UnrollBlock' info and create interpreter frames.
3100 push_skeleton_frames(masm, true/*deopt*/,
3101 unroll_block_reg,
3102 R23_tmp3,
3103 R24_tmp4,
3104 R25_tmp5,
3105 R26_tmp6,
3106 R27_tmp7);
3107
3108 // stack: (skeletal interpreter frame, ..., optional skeletal
3109 // interpreter frame, optional c2i, caller of deoptee, ...).
3110
3111 // push an `unpack_frame' taking care of float / int return values.
3112 __ push_frame(frame_size_in_bytes, R0/*tmp*/);
3113
3114 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3115 // skeletal interpreter frame, optional c2i, caller of deoptee,
3116 // ...).
3117
3118 // Spill live volatile registers since we'll do a call.
3119 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3120 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3121
3122 // Let the unpacker layout information in the skeletal frames just
3123 // allocated.
3124 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true);
3125 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
3126 // This is a call to a LEAF method, so no oop map is required.
3127 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3128 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
3129 __ reset_last_Java_frame();
3130
3131 // Restore the volatiles saved above.
3132 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP);
3133 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP);
3134
3135 // Pop the unpack frame.
3136 __ pop_frame();
3137 __ restore_LR_CR(R0);
3138
3139 // stack: (top interpreter frame, ..., optional interpreter frame,
3140 // optional c2i, caller of deoptee, ...).
3141
3142 // Initialize R14_state.
3143 __ restore_interpreter_state(R11_scratch1);
3144 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3145
3146 // Return to the interpreter entry point.
3147 __ blr();
3148 __ flush();
3149 #else // COMPILER2
3150 __ unimplemented("deopt blob needed only with compiler");
3151 int exception_offset = __ pc() - start;
3152 #endif // COMPILER2
3153
3154 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
3155 reexecute_offset, first_frame_size_in_bytes / wordSize);
3156 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3157 }
3158
3159 #ifdef COMPILER2
3160 void SharedRuntime::generate_uncommon_trap_blob() {
3161 // Allocate space for the code.
3162 ResourceMark rm;
3163 // Setup code generation tools.
3164 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3165 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
3166 address start = __ pc();
3167
3168 if (UseRTMLocking) {
3169 // Abort RTM transaction before possible nmethod deoptimization.
3170 __ tabort_();
3171 }
3172
3173 Register unroll_block_reg = R21_tmp1;
3174 Register klass_index_reg = R22_tmp2;
3175 Register unc_trap_reg = R23_tmp3;
3176 Register r_return_pc = R27_tmp7;
3177
3178 OopMapSet* oop_maps = new OopMapSet();
3179 int frame_size_in_bytes = frame::native_abi_reg_args_size;
3180 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
3181
3182 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3183
3184 // Push a dummy `unpack_frame' and call
3185 // `Deoptimization::uncommon_trap' to pack the compiled frame into a
3186 // vframe array and return the `UnrollBlock' information.
3187
3188 // Save LR to compiled frame.
3189 __ save_LR_CR(R11_scratch1);
3190
3191 // Push an "uncommon_trap" frame.
3192 __ push_frame_reg_args(0, R11_scratch1);
3193
3194 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
3195
3196 // Set the `unpack_frame' as last_Java_frame.
3197 // `Deoptimization::uncommon_trap' expects it and considers its
3198 // sender frame as the deoptee frame.
3199 // Remember the offset of the instruction whose address will be
3200 // moved to R11_scratch1.
3201 address gc_map_pc = __ pc();
3202 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true);
3203 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3204
3205 __ mr(klass_index_reg, R3);
3206 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap);
3207 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
3208 R16_thread, klass_index_reg, R5_ARG3);
3209
3210 // Set an oopmap for the call site.
3211 oop_maps->add_gc_map(gc_map_pc - start, map);
3212
3213 __ reset_last_Java_frame();
3214
3215 // Pop the `unpack frame'.
3216 __ pop_frame();
3217
3218 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3219
3220 // Save the return value.
3221 __ mr(unroll_block_reg, R3_RET);
3222
3223 // Pop the uncommon_trap frame.
3224 __ pop_frame();
3225
3226 // stack: (caller_of_deoptee, ...).
3227
3228 #ifdef ASSERT
3229 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
3230 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
3231 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3232 #endif
3233
3234 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled.
3235 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
3236 // and the frame is effectively not resized.
3237 Register caller_sp = R23_tmp3;
3238 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
3239 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
3240
3241 // Allocate new interpreter frame(s) and possibly a c2i adapter
3242 // frame.
3243 push_skeleton_frames(masm, false/*deopt*/,
3244 unroll_block_reg,
3245 R22_tmp2,
3246 R23_tmp3,
3247 R24_tmp4,
3248 R25_tmp5,
3249 R26_tmp6);
3250
3251 // stack: (skeletal interpreter frame, ..., optional skeletal
3252 // interpreter frame, optional c2i, caller of deoptee, ...).
3253
3254 // Push a dummy `unpack_frame' taking care of float return values.
3255 // Call `Deoptimization::unpack_frames' to layout information in the
3256 // interpreter frames just created.
3257
3258 // Push a simple "unpack frame" here.
3259 __ push_frame_reg_args(0, R11_scratch1);
3260
3261 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3262 // skeletal interpreter frame, optional c2i, caller of deoptee,
3263 // ...).
3264
3265 // Set the "unpack_frame" as last_Java_frame.
3266 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc);
3267
3268 // Indicate it is the uncommon trap case.
3269 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
3270 // Let the unpacker layout information in the skeletal frames just
3271 // allocated.
3272 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3273 R16_thread, unc_trap_reg);
3274
3275 __ reset_last_Java_frame();
3276 // Pop the `unpack frame'.
3277 __ pop_frame();
3278 // Restore LR from top interpreter frame.
3279 __ restore_LR_CR(R11_scratch1);
3280
3281 // stack: (top interpreter frame, ..., optional interpreter frame,
3282 // optional c2i, caller of deoptee, ...).
3283
3284 __ restore_interpreter_state(R11_scratch1);
3285 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3286
3287 // Return to the interpreter entry point.
3288 __ blr();
3289
3290 masm->flush();
3291
3292 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
3293 }
3294 #endif // COMPILER2
3295
3296 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
3297 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3298 assert(StubRoutines::forward_exception_entry() != nullptr,
3299 "must be generated before");
3300
3301 ResourceMark rm;
3302 OopMapSet *oop_maps = new OopMapSet();
3303 OopMap* map;
3304
3305 // Allocate space for the code. Setup code generation tools.
3306 CodeBuffer buffer("handler_blob", 2048, 1024);
3307 MacroAssembler* masm = new MacroAssembler(&buffer);
3308
3309 address start = __ pc();
3310 int frame_size_in_bytes = 0;
3311
3312 RegisterSaver::ReturnPCLocation return_pc_location;
3313 bool cause_return = (poll_type == POLL_AT_RETURN);
3314 if (cause_return) {
3315 // Nothing to do here. The frame has already been popped in MachEpilogNode.
3316 // Register LR already contains the return pc.
3317 return_pc_location = RegisterSaver::return_pc_is_pre_saved;
3318 } else {
3319 // Use thread()->saved_exception_pc() as return pc.
3320 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3321 }
3322
3323 if (UseRTMLocking) {
3324 // Abort RTM transaction before calling runtime
3325 // because critical section can be large and so
3326 // will abort anyway. Also nmethod can be deoptimized.
3327 __ tabort_();
3328 }
3329
3330 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3331
3332 // Save registers, fpu state, and flags. Set R31 = return pc.
3333 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3334 &frame_size_in_bytes,
3335 /*generate_oop_map=*/ true,
3336 /*return_pc_adjustment=*/0,
3337 return_pc_location, save_vectors);
3338
3339 // The following is basically a call_VM. However, we need the precise
3340 // address of the call in order to generate an oopmap. Hence, we do all the
3341 // work ourselves.
3342 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3343
3344 // The return address must always be correct so that the frame constructor
3345 // never sees an invalid pc.
3346
3347 // Do the call
3348 __ call_VM_leaf(call_ptr, R16_thread);
3349 address calls_return_pc = __ last_calls_return_pc();
3350
3351 // Set an oopmap for the call site. This oopmap will map all
3352 // oop-registers and debug-info registers as callee-saved. This
3353 // will allow deoptimization at this safepoint to find all possible
3354 // debug-info recordings, as well as let GC find all oops.
3355 oop_maps->add_gc_map(calls_return_pc - start, map);
3356
3357 Label noException;
3358
3359 // Clear the last Java frame.
3360 __ reset_last_Java_frame();
3361
3362 BLOCK_COMMENT(" Check pending exception.");
3363 const Register pending_exception = R0;
3364 __ ld(pending_exception, thread_(pending_exception));
3365 __ cmpdi(CCR0, pending_exception, 0);
3366 __ beq(CCR0, noException);
3367
3368 // Exception pending
3369 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3370 frame_size_in_bytes,
3371 /*restore_ctr=*/true, save_vectors);
3372
3373 BLOCK_COMMENT(" Jump to forward_exception_entry.");
3374 // Jump to forward_exception_entry, with the issuing PC in LR
3375 // so it looks like the original nmethod called forward_exception_entry.
3376 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3377
3378 // No exception case.
3379 __ BIND(noException);
3380
3381 if (!cause_return) {
3382 Label no_adjust;
3383 // If our stashed return pc was modified by the runtime we avoid touching it
3384 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP);
3385 __ cmpd(CCR0, R0, R31);
3386 __ bne(CCR0, no_adjust);
3387
3388 // Adjust return pc forward to step over the safepoint poll instruction
3389 __ addi(R31, R31, 4);
3390 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
3391
3392 __ bind(no_adjust);
3393 }
3394
3395 // Normal exit, restore registers and exit.
3396 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3397 frame_size_in_bytes,
3398 /*restore_ctr=*/true, save_vectors);
3399
3400 __ blr();
3401
3402 // Make sure all code is generated
3403 masm->flush();
3404
3405 // Fill-out other meta info
3406 // CodeBlob frame size is in words.
3407 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3408 }
3409
3410 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3411 //
3412 // Generate a stub that calls into the vm to find out the proper destination
3413 // of a java call. All the argument registers are live at this point
3414 // but since this is generic code we don't know what they are and the caller
3415 // must do any gc of the args.
3416 //
3417 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3418
3419 // allocate space for the code
3420 ResourceMark rm;
3421
3422 CodeBuffer buffer(name, 1000, 512);
3423 MacroAssembler* masm = new MacroAssembler(&buffer);
3424
3425 int frame_size_in_bytes;
3426
3427 OopMapSet *oop_maps = new OopMapSet();
3428 OopMap* map = nullptr;
3429
3430 address start = __ pc();
3431
3432 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3433 &frame_size_in_bytes,
3434 /*generate_oop_map*/ true,
3435 /*return_pc_adjustment*/ 0,
3436 RegisterSaver::return_pc_is_lr);
3437
3438 // Use noreg as last_Java_pc, the return pc will be reconstructed
3439 // from the physical frame.
3440 __ set_last_Java_frame(/*sp*/R1_SP, noreg);
3441
3442 int frame_complete = __ offset();
3443
3444 // Pass R19_method as 2nd (optional) argument, used by
3445 // counter_overflow_stub.
3446 __ call_VM_leaf(destination, R16_thread, R19_method);
3447 address calls_return_pc = __ last_calls_return_pc();
3448 // Set an oopmap for the call site.
3449 // We need this not only for callee-saved registers, but also for volatile
3450 // registers that the compiler might be keeping live across a safepoint.
3451 // Create the oopmap for the call's return pc.
3452 oop_maps->add_gc_map(calls_return_pc - start, map);
3453
3454 // R3_RET contains the address we are going to jump to assuming no exception got installed.
3455
3456 // clear last_Java_sp
3457 __ reset_last_Java_frame();
3458
3459 // Check for pending exceptions.
3460 BLOCK_COMMENT("Check for pending exceptions.");
3461 Label pending;
3462 __ ld(R11_scratch1, thread_(pending_exception));
3463 __ cmpdi(CCR0, R11_scratch1, 0);
3464 __ bne(CCR0, pending);
3465
3466 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
3467
3468 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
3469
3470 // Get the returned method.
3471 __ get_vm_result_2(R19_method);
3472
3473 __ bctr();
3474
3475
3476 // Pending exception after the safepoint.
3477 __ BIND(pending);
3478
3479 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
3480
3481 // exception pending => remove activation and forward to exception handler
3482
3483 __ li(R11_scratch1, 0);
3484 __ ld(R3_ARG1, thread_(pending_exception));
3485 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread);
3486 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3487
3488 // -------------
3489 // Make sure all code is generated.
3490 masm->flush();
3491
3492 // return the blob
3493 // frame_size_words or bytes??
3494 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
3495 oop_maps, true);
3496 }
3497
3498
3499 //------------------------------Montgomery multiplication------------------------
3500 //
3501
3502 // Subtract 0:b from carry:a. Return carry.
3503 static unsigned long
3504 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3505 long i = 0;
3506 unsigned long tmp, tmp2;
3507 __asm__ __volatile__ (
3508 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
3509 "mtctr %[len] \n"
3510 "0: \n"
3511 "ldx %[tmp], %[i], %[a] \n"
3512 "ldx %[tmp2], %[i], %[b] \n"
3513 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
3514 "stdx %[tmp], %[i], %[a] \n"
3515 "addi %[i], %[i], 8 \n"
3516 "bdnz 0b \n"
3517 "addme %[tmp], %[carry] \n" // carry + CA - 1
3518 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
3519 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
3520 : "ctr", "xer", "memory"
3521 );
3522 return tmp;
3523 }
3524
3525 // Multiply (unsigned) Long A by Long B, accumulating the double-
3526 // length result into the accumulator formed of T0, T1, and T2.
3527 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3528 unsigned long hi, lo;
3529 __asm__ __volatile__ (
3530 "mulld %[lo], %[A], %[B] \n"
3531 "mulhdu %[hi], %[A], %[B] \n"
3532 "addc %[T0], %[T0], %[lo] \n"
3533 "adde %[T1], %[T1], %[hi] \n"
3534 "addze %[T2], %[T2] \n"
3535 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3536 : [A]"r"(A), [B]"r"(B)
3537 : "xer"
3538 );
3539 }
3540
3541 // As above, but add twice the double-length result into the
3542 // accumulator.
3543 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3544 unsigned long hi, lo;
3545 __asm__ __volatile__ (
3546 "mulld %[lo], %[A], %[B] \n"
3547 "mulhdu %[hi], %[A], %[B] \n"
3548 "addc %[T0], %[T0], %[lo] \n"
3549 "adde %[T1], %[T1], %[hi] \n"
3550 "addze %[T2], %[T2] \n"
3551 "addc %[T0], %[T0], %[lo] \n"
3552 "adde %[T1], %[T1], %[hi] \n"
3553 "addze %[T2], %[T2] \n"
3554 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3555 : [A]"r"(A), [B]"r"(B)
3556 : "xer"
3557 );
3558 }
3559
3560 // Fast Montgomery multiplication. The derivation of the algorithm is
3561 // in "A Cryptographic Library for the Motorola DSP56000,
3562 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
3563 static void
3564 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3565 unsigned long m[], unsigned long inv, int len) {
3566 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3567 int i;
3568
3569 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3570
3571 for (i = 0; i < len; i++) {
3572 int j;
3573 for (j = 0; j < i; j++) {
3574 MACC(a[j], b[i-j], t0, t1, t2);
3575 MACC(m[j], n[i-j], t0, t1, t2);
3576 }
3577 MACC(a[i], b[0], t0, t1, t2);
3578 m[i] = t0 * inv;
3579 MACC(m[i], n[0], t0, t1, t2);
3580
3581 assert(t0 == 0, "broken Montgomery multiply");
3582
3583 t0 = t1; t1 = t2; t2 = 0;
3584 }
3585
3586 for (i = len; i < 2*len; i++) {
3587 int j;
3588 for (j = i-len+1; j < len; j++) {
3589 MACC(a[j], b[i-j], t0, t1, t2);
3590 MACC(m[j], n[i-j], t0, t1, t2);
3591 }
3592 m[i-len] = t0;
3593 t0 = t1; t1 = t2; t2 = 0;
3594 }
3595
3596 while (t0) {
3597 t0 = sub(m, n, t0, len);
3598 }
3599 }
3600
3601 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3602 // multiplies so it should be up to 25% faster than Montgomery
3603 // multiplication. However, its loop control is more complex and it
3604 // may actually run slower on some machines.
3605 static void
3606 montgomery_square(unsigned long a[], unsigned long n[],
3607 unsigned long m[], unsigned long inv, int len) {
3608 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3609 int i;
3610
3611 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3612
3613 for (i = 0; i < len; i++) {
3614 int j;
3615 int end = (i+1)/2;
3616 for (j = 0; j < end; j++) {
3617 MACC2(a[j], a[i-j], t0, t1, t2);
3618 MACC(m[j], n[i-j], t0, t1, t2);
3619 }
3620 if ((i & 1) == 0) {
3621 MACC(a[j], a[j], t0, t1, t2);
3622 }
3623 for (; j < i; j++) {
3624 MACC(m[j], n[i-j], t0, t1, t2);
3625 }
3626 m[i] = t0 * inv;
3627 MACC(m[i], n[0], t0, t1, t2);
3628
3629 assert(t0 == 0, "broken Montgomery square");
3630
3631 t0 = t1; t1 = t2; t2 = 0;
3632 }
3633
3634 for (i = len; i < 2*len; i++) {
3635 int start = i-len+1;
3636 int end = start + (len - start)/2;
3637 int j;
3638 for (j = start; j < end; j++) {
3639 MACC2(a[j], a[i-j], t0, t1, t2);
3640 MACC(m[j], n[i-j], t0, t1, t2);
3641 }
3642 if ((i & 1) == 0) {
3643 MACC(a[j], a[j], t0, t1, t2);
3644 }
3645 for (; j < len; j++) {
3646 MACC(m[j], n[i-j], t0, t1, t2);
3647 }
3648 m[i-len] = t0;
3649 t0 = t1; t1 = t2; t2 = 0;
3650 }
3651
3652 while (t0) {
3653 t0 = sub(m, n, t0, len);
3654 }
3655 }
3656
3657 // The threshold at which squaring is advantageous was determined
3658 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3659 // Doesn't seem to be relevant for Power8 so we use the same value.
3660 #define MONTGOMERY_SQUARING_THRESHOLD 64
3661
3662 // Copy len longwords from s to d, word-swapping as we go. The
3663 // destination array is reversed.
3664 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3665 d += len;
3666 while(len-- > 0) {
3667 d--;
3668 unsigned long s_val = *s;
3669 // Swap words in a longword on little endian machines.
3670 #ifdef VM_LITTLE_ENDIAN
3671 s_val = (s_val << 32) | (s_val >> 32);
3672 #endif
3673 *d = s_val;
3674 s++;
3675 }
3676 }
3677
3678 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3679 jint len, jlong inv,
3680 jint *m_ints) {
3681 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3682 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3683 int longwords = len/2;
3684
3685 // Make very sure we don't use so much space that the stack might
3686 // overflow. 512 jints corresponds to an 16384-bit integer and
3687 // will use here a total of 8k bytes of stack space.
3688 int divisor = sizeof(unsigned long) * 4;
3689 guarantee(longwords <= 8192 / divisor, "must be");
3690 int total_allocation = longwords * sizeof (unsigned long) * 4;
3691 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3692
3693 // Local scratch arrays
3694 unsigned long
3695 *a = scratch + 0 * longwords,
3696 *b = scratch + 1 * longwords,
3697 *n = scratch + 2 * longwords,
3698 *m = scratch + 3 * longwords;
3699
3700 reverse_words((unsigned long *)a_ints, a, longwords);
3701 reverse_words((unsigned long *)b_ints, b, longwords);
3702 reverse_words((unsigned long *)n_ints, n, longwords);
3703
3704 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3705
3706 reverse_words(m, (unsigned long *)m_ints, longwords);
3707 }
3708
3709 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3710 jint len, jlong inv,
3711 jint *m_ints) {
3712 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls.
3713 assert(len % 2 == 0, "array length in montgomery_square must be even");
3714 int longwords = len/2;
3715
3716 // Make very sure we don't use so much space that the stack might
3717 // overflow. 512 jints corresponds to an 16384-bit integer and
3718 // will use here a total of 6k bytes of stack space.
3719 int divisor = sizeof(unsigned long) * 3;
3720 guarantee(longwords <= (8192 / divisor), "must be");
3721 int total_allocation = longwords * sizeof (unsigned long) * 3;
3722 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3723
3724 // Local scratch arrays
3725 unsigned long
3726 *a = scratch + 0 * longwords,
3727 *n = scratch + 1 * longwords,
3728 *m = scratch + 2 * longwords;
3729
3730 reverse_words((unsigned long *)a_ints, a, longwords);
3731 reverse_words((unsigned long *)n_ints, n, longwords);
3732
3733 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3734 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3735 } else {
3736 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3737 }
3738
3739 reverse_words(m, (unsigned long *)m_ints, longwords);
3740 }